diff --git a/pom.xml b/pom.xml
index 1abf738..ffa9bea 100644
--- a/pom.xml
+++ b/pom.xml
@@ -148,6 +148,7 @@
0.9.2
0.9.2
1.2.16
+ 2.3
1.2.17
2.3
1.9.5
@@ -365,6 +366,21 @@
${log4j-extras.version}
+ org.apache.logging.log4j
+ log4j-1.2-api
+ ${log4j2.version}
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+ ${log4j2.version}
+
+
+ org.apache.logging.log4j
+ log4j-jcl
+ ${log4j2.version}
+
+
org.antlr
antlr-runtime
${antlr.version}
diff --git a/ql/pom.xml b/ql/pom.xml
index 6026c49..8d62035 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -122,6 +122,21 @@
${log4j-extras.version}
+ org.apache.logging.log4j
+ log4j-1.2-api
+ ${log4j2.version}
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+ ${log4j2.version}
+
+
+ org.apache.logging.log4j
+ log4j-jcl
+ ${log4j2.version}
+
+
org.antlr
antlr-runtime
${antlr.version}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java
new file mode 100644
index 0000000..448527b
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.log;
+
+import java.io.Serializable;
+
+import org.apache.logging.log4j.core.Filter;
+import org.apache.logging.log4j.core.Layout;
+import org.apache.logging.log4j.core.LogEvent;
+import org.apache.logging.log4j.core.appender.AbstractAppender;
+import org.apache.logging.log4j.core.layout.PatternLayout;
+
+/**
+ * A NullAppender merely exists, it never outputs a message to any device.
+ */
+public class NullAppender extends AbstractAppender {
+
+ protected NullAppender(String name, Filter filter,
+ Layout extends Serializable> layout, boolean ignoreExceptions) {
+ super(name, filter, layout, ignoreExceptions);
+ }
+
+ public static NullAppender createNullAppender() {
+ return new NullAppender("NullAppender", null, PatternLayout.createDefaultLayout(), true);
+ }
+
+ public void append(LogEvent event) {
+ // no-op
+ }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/StringAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/StringAppender.java
new file mode 100644
index 0000000..e3afcc9
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/StringAppender.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.log;
+
+import java.io.ByteArrayOutputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Serializable;
+
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Filter;
+import org.apache.logging.log4j.core.Layout;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender;
+import org.apache.logging.log4j.core.appender.OutputStreamManager;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.apache.logging.log4j.core.layout.PatternLayout;
+
+/**
+ * Log4j2 appender that writers to in-memory string object.
+ */
+public class StringAppender
+ extends AbstractOutputStreamAppender {
+
+ private static final String APPENDER_NAME = "StringAppender";
+ private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
+ private static Configuration configuration = context.getConfiguration();
+ private StringOutputStreamManager manager;
+
+ /**
+ * Instantiate a WriterAppender and set the output destination to a
+ * new {@link OutputStreamWriter} initialized with os
+ * as its {@link OutputStream}.
+ *
+ * @param name The name of the Appender.
+ * @param layout The layout to format the message.
+ * @param filter
+ * @param ignoreExceptions
+ * @param immediateFlush
+ * @param manager The OutputStreamManager.
+ */
+ protected StringAppender(String name,
+ Layout extends Serializable> layout, Filter filter,
+ boolean ignoreExceptions, boolean immediateFlush,
+ StringOutputStreamManager manager) {
+ super(name, layout, filter, ignoreExceptions, immediateFlush, manager);
+ this.manager = manager;
+ }
+
+ public static StringAppender createStringAppender(String nullablePatternString) {
+ PatternLayout layout;
+ if (nullablePatternString == null) {
+ layout = PatternLayout.createDefaultLayout();
+ } else {
+ layout = PatternLayout.createLayout(nullablePatternString, configuration,
+ null, null, true, false, null, null);
+ }
+
+ return new StringAppender(APPENDER_NAME, layout, null, false, true,
+ new StringOutputStreamManager(new ByteArrayOutputStream(), "StringStream", layout));
+ }
+
+ public void addToLogger(String loggerName, Level level) {
+ LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
+ loggerConfig.addAppender(this, level, null);
+ context.updateLoggers();
+ }
+
+ public void removeFromLogger(String loggerName) {
+ LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
+ loggerConfig.removeAppender(APPENDER_NAME);
+ context.updateLoggers();
+ }
+
+ public String getOutput() {
+ manager.flush();
+ return new String(manager.getStream().toByteArray());
+ }
+
+ public void reset() {
+ manager.reset();
+ }
+
+ protected static class StringOutputStreamManager extends OutputStreamManager {
+ ByteArrayOutputStream stream;
+
+ protected StringOutputStreamManager(ByteArrayOutputStream os, String streamName,
+ Layout> layout) {
+ super(os, streamName, layout);
+ stream = os;
+ }
+
+ public ByteArrayOutputStream getStream() {
+ return stream;
+ }
+
+ public void reset() {
+ stream.reset();
+ }
+ }
+}
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index 99fbd5d..8c51946 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -20,7 +20,6 @@
import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
-import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -29,8 +28,6 @@
import java.util.Map;
import java.util.regex.Pattern;
-import junit.framework.TestCase;
-
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -46,6 +43,7 @@
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.index.HiveIndex;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
+import org.apache.hadoop.hive.ql.log.StringAppender;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
@@ -56,15 +54,19 @@
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import org.apache.log4j.WriterAppender;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.junit.Assert;
import com.google.common.collect.ImmutableMap;
+import junit.framework.TestCase;
+
/**
* TestHive.
*
@@ -248,36 +250,39 @@ public void testThriftTable() throws Throwable {
* @throws Throwable
*/
public void testMetaStoreApiTiming() throws Throwable {
- // set log level to DEBUG, as this is logged at debug level
- Logger logger = Logger.getLogger("hive.ql.metadata.Hive");
- Level origLevel = logger.getLevel();
- logger.setLevel(Level.DEBUG);
-
- // create an appender to capture the logs in a string
- StringWriter writer = new StringWriter();
- WriterAppender appender = new WriterAppender(new PatternLayout(), writer);
+ // Get the RootLogger which, if you don't have log4j2-test.xml defined, will only log ERRORs
+ Logger logger = LogManager.getLogger("hive.ql.metadata.Hive");
+ Level oldLevel = logger.getLevel();
+ LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+ Configuration config = ctx.getConfiguration();
+ LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
+ loggerConfig.setLevel(Level.DEBUG);
+ ctx.updateLoggers();
+
+ // Create a String Appender to capture log output
+ StringAppender appender = StringAppender.createStringAppender("%m");
+ appender.addToLogger(logger.getName(), Level.DEBUG);
+ appender.start();
try {
- logger.addAppender(appender);
-
hm.clearMetaCallTiming();
hm.getAllDatabases();
hm.dumpAndClearMetaCallTiming("test");
- String logStr = writer.toString();
+ String logStr = appender.getOutput();
String expectedString = "getAllDatabases_()=";
Assert.assertTrue(logStr + " should contain <" + expectedString,
logStr.contains(expectedString));
// reset the log buffer, verify new dump without any api call does not contain func
- writer.getBuffer().setLength(0);
+ appender.reset();
hm.dumpAndClearMetaCallTiming("test");
- logStr = writer.toString();
+ logStr = appender.getOutput();
Assert.assertFalse(logStr + " should not contain <" + expectedString,
logStr.contains(expectedString));
-
} finally {
- logger.setLevel(origLevel);
- logger.removeAppender(appender);
+ loggerConfig.setLevel(oldLevel);
+ ctx.updateLoggers();
+ appender.removeFromLogger(logger.getName());
}
}