diff --git a/pom.xml b/pom.xml index 1abf738..ffa9bea 100644 --- a/pom.xml +++ b/pom.xml @@ -148,6 +148,7 @@ 0.9.2 0.9.2 1.2.16 + 2.3 1.2.17 2.3 1.9.5 @@ -365,6 +366,21 @@ ${log4j-extras.version} + org.apache.logging.log4j + log4j-1.2-api + ${log4j2.version} + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + org.apache.logging.log4j + log4j-jcl + ${log4j2.version} + + org.antlr antlr-runtime ${antlr.version} diff --git a/ql/pom.xml b/ql/pom.xml index 6026c49..8d62035 100644 --- a/ql/pom.xml +++ b/ql/pom.xml @@ -122,6 +122,21 @@ ${log4j-extras.version} + org.apache.logging.log4j + log4j-1.2-api + ${log4j2.version} + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + org.apache.logging.log4j + log4j-jcl + ${log4j2.version} + + org.antlr antlr-runtime ${antlr.version} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index a2cf712..82345ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -27,7 +27,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Enumeration; import java.util.List; import java.util.Properties; @@ -57,13 +56,14 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.exec.tez.TezSessionState; import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager; +import org.apache.hadoop.hive.ql.exec.tez.TezSessionState; import org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.io.HiveOutputFormatImpl; import org.apache.hadoop.hive.ql.io.IOPrepareCache; +import org.apache.hadoop.hive.ql.log.NullAppender; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.MapWork; @@ -88,11 +88,12 @@ import org.apache.hadoop.mapred.Partitioner; import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.log4j.Appender; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.FileAppender; -import org.apache.log4j.LogManager; -import org.apache.log4j.varia.NullAppender; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.Appender; +import org.apache.logging.log4j.core.appender.FileAppender; +import org.apache.logging.log4j.core.appender.RollingFileAppender; /** * ExecDriver is the central class in co-ordinating execution of any map-reduce task. @@ -687,8 +688,10 @@ public static void main(String[] args) throws IOException, HiveException { if (noLog) { // If started from main(), and noLog is on, we should not output // any logs. To turn the log on, please set -Dtest.silent=false - BasicConfigurator.resetConfiguration(); - BasicConfigurator.configure(new NullAppender()); + Logger logger = org.apache.logging.log4j.LogManager.getRootLogger(); + NullAppender appender = NullAppender.createNullAppender(); + appender.addToLogger(logger.getName(), Level.ERROR); + appender.start(); } else { setupChildLog4j(conf); } @@ -703,10 +706,12 @@ public static void main(String[] args) throws IOException, HiveException { // print out the location of the log file for the user so // that it's easy to find reason for local mode execution failures - for (Appender appender : Collections.list((Enumeration) LogManager.getRootLogger() - .getAllAppenders())) { + for (Appender appender : ((org.apache.logging.log4j.core.Logger) LogManager.getRootLogger()) + .getAppenders().values()) { if (appender instanceof FileAppender) { - console.printInfo("Execution log at: " + ((FileAppender) appender).getFile()); + console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName()); + } else if (appender instanceof RollingFileAppender) { + console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java new file mode 100644 index 0000000..97d9b6e --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.log; + +import java.io.Serializable; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.Filter; +import org.apache.logging.log4j.core.Layout; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.apache.logging.log4j.core.layout.PatternLayout; + +/** + * A NullAppender merely exists, it never outputs a message to any device. + */ +public class NullAppender extends AbstractAppender { + + private static LoggerContext context = (LoggerContext) LogManager.getContext(false); + private static Configuration configuration = context.getConfiguration(); + + protected NullAppender(String name, Filter filter, + Layout layout, boolean ignoreExceptions) { + super(name, filter, layout, ignoreExceptions); + } + + public static NullAppender createNullAppender() { + return new NullAppender("NullAppender", null, PatternLayout.createDefaultLayout(), true); + } + + public void addToLogger(String loggerName, Level level) { + LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName); + loggerConfig.addAppender(this, level, null); + context.updateLoggers(); + } + + public void append(LogEvent event) { + // no-op + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/StringAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/StringAppender.java new file mode 100644 index 0000000..e3afcc9 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/log/StringAppender.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.log; + +import java.io.ByteArrayOutputStream; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Serializable; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.Filter; +import org.apache.logging.log4j.core.Layout; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender; +import org.apache.logging.log4j.core.appender.OutputStreamManager; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.apache.logging.log4j.core.layout.PatternLayout; + +/** + * Log4j2 appender that writers to in-memory string object. + */ +public class StringAppender + extends AbstractOutputStreamAppender { + + private static final String APPENDER_NAME = "StringAppender"; + private static LoggerContext context = (LoggerContext) LogManager.getContext(false); + private static Configuration configuration = context.getConfiguration(); + private StringOutputStreamManager manager; + + /** + * Instantiate a WriterAppender and set the output destination to a + * new {@link OutputStreamWriter} initialized with os + * as its {@link OutputStream}. + * + * @param name The name of the Appender. + * @param layout The layout to format the message. + * @param filter + * @param ignoreExceptions + * @param immediateFlush + * @param manager The OutputStreamManager. + */ + protected StringAppender(String name, + Layout layout, Filter filter, + boolean ignoreExceptions, boolean immediateFlush, + StringOutputStreamManager manager) { + super(name, layout, filter, ignoreExceptions, immediateFlush, manager); + this.manager = manager; + } + + public static StringAppender createStringAppender(String nullablePatternString) { + PatternLayout layout; + if (nullablePatternString == null) { + layout = PatternLayout.createDefaultLayout(); + } else { + layout = PatternLayout.createLayout(nullablePatternString, configuration, + null, null, true, false, null, null); + } + + return new StringAppender(APPENDER_NAME, layout, null, false, true, + new StringOutputStreamManager(new ByteArrayOutputStream(), "StringStream", layout)); + } + + public void addToLogger(String loggerName, Level level) { + LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName); + loggerConfig.addAppender(this, level, null); + context.updateLoggers(); + } + + public void removeFromLogger(String loggerName) { + LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName); + loggerConfig.removeAppender(APPENDER_NAME); + context.updateLoggers(); + } + + public String getOutput() { + manager.flush(); + return new String(manager.getStream().toByteArray()); + } + + public void reset() { + manager.reset(); + } + + protected static class StringOutputStreamManager extends OutputStreamManager { + ByteArrayOutputStream stream; + + protected StringOutputStreamManager(ByteArrayOutputStream os, String streamName, + Layout layout) { + super(os, streamName, layout); + stream = os; + } + + public ByteArrayOutputStream getStream() { + return stream; + } + + public void reset() { + stream.reset(); + } + } +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 99fbd5d..8c51946 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -20,7 +20,6 @@ import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; -import java.io.StringWriter; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -29,8 +28,6 @@ import java.util.Map; import java.util.regex.Pattern; -import junit.framework.TestCase; - import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -46,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.index.HiveIndex; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; +import org.apache.hadoop.hive.ql.log.StringAppender; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; @@ -56,15 +54,19 @@ import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.util.StringUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.WriterAppender; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; import org.apache.thrift.protocol.TBinaryProtocol; import org.junit.Assert; import com.google.common.collect.ImmutableMap; +import junit.framework.TestCase; + /** * TestHive. * @@ -248,36 +250,39 @@ public void testThriftTable() throws Throwable { * @throws Throwable */ public void testMetaStoreApiTiming() throws Throwable { - // set log level to DEBUG, as this is logged at debug level - Logger logger = Logger.getLogger("hive.ql.metadata.Hive"); - Level origLevel = logger.getLevel(); - logger.setLevel(Level.DEBUG); - - // create an appender to capture the logs in a string - StringWriter writer = new StringWriter(); - WriterAppender appender = new WriterAppender(new PatternLayout(), writer); + // Get the RootLogger which, if you don't have log4j2-test.xml defined, will only log ERRORs + Logger logger = LogManager.getLogger("hive.ql.metadata.Hive"); + Level oldLevel = logger.getLevel(); + LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName()); + loggerConfig.setLevel(Level.DEBUG); + ctx.updateLoggers(); + + // Create a String Appender to capture log output + StringAppender appender = StringAppender.createStringAppender("%m"); + appender.addToLogger(logger.getName(), Level.DEBUG); + appender.start(); try { - logger.addAppender(appender); - hm.clearMetaCallTiming(); hm.getAllDatabases(); hm.dumpAndClearMetaCallTiming("test"); - String logStr = writer.toString(); + String logStr = appender.getOutput(); String expectedString = "getAllDatabases_()="; Assert.assertTrue(logStr + " should contain <" + expectedString, logStr.contains(expectedString)); // reset the log buffer, verify new dump without any api call does not contain func - writer.getBuffer().setLength(0); + appender.reset(); hm.dumpAndClearMetaCallTiming("test"); - logStr = writer.toString(); + logStr = appender.getOutput(); Assert.assertFalse(logStr + " should not contain <" + expectedString, logStr.contains(expectedString)); - } finally { - logger.setLevel(origLevel); - logger.removeAppender(appender); + loggerConfig.setLevel(oldLevel); + ctx.updateLoggers(); + appender.removeFromLogger(logger.getName()); } }