commit c13d95a47d10083c60c1d6c68e22b30960b52b4b Author: Andrew Sherman Date: Mon Oct 16 16:38:00 2017 -0700 HIVE-17826: Error writing to RandomAccessFile after operation log is closed. In HIVE-17128 we prevented file descriptors leaks by closing the log4j Appender used for Operation Logs. In the bug report we see that sometimes logging is attempted even after the Operation is closed. The current appender, a RandomAccessFileAppender, will continue to try to write to its log file, even after it has been stopped. To fix this, create a new class, HushableRandomAccessFileAppender based on log4j's RandomAccessFileAppender. https://github.com/apache/logging-log4j2/blob/c02e7de69e81c174f1ea0be43639d9e231fa5283/log4j-core/src/main/java/org/apache/logging/log4j/core/appender/RandomAccessFileAppender.java Unfortunately that class is final and hard to extend by delegation so the code is copied here. The only substantive change is that a stopped HushableRandomAccessFileAppender will no longer append after it has been stopped. if (isStopped()) { // Don't try to log anything when appender is stopped return; } Make Operation Logging use the HushableRandomAccessFileAppender Add a test that writes to the appender after it has been stopped. Add a minimal LogEvent implementation for testing diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java index 3c30069adc04aea54587953159b951d1b0776705..8febe3e79ff892c54b696b6c6ef92f7026c46033 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java @@ -25,6 +25,7 @@ import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.log.HushableRandomAccessFileAppender; import org.apache.hadoop.hive.ql.log.LogDivertAppender; import org.apache.hadoop.hive.ql.log.LogDivertAppenderForTest; import org.apache.hive.jdbc.miniHS2.MiniHS2; @@ -34,7 +35,9 @@ import org.apache.hive.service.cli.OperationHandle; import org.apache.hive.service.cli.RowSet; import org.apache.hive.service.cli.SessionHandle; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.AbstractLogEvent; import org.apache.logging.log4j.core.Appender; import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.core.appender.routing.RoutingAppender; @@ -152,6 +155,16 @@ private void checkAppenderState(String msg, String routingAppenderName, String q Appender appender = appenderControl.getAppender(); Assert.assertNotNull(msg + "could not find Appender for query id " + queryId + " from AppenderControl " + appenderControl, appender); Assert.assertEquals(msg + "Appender for query is in unexpected state", expectedStopped, appender.isStopped()); + Assert.assertTrue("Appender should be a HushableMutableRandomAccessAppender", appender instanceof HushableRandomAccessFileAppender); + HushableRandomAccessFileAppender ra = (HushableRandomAccessFileAppender) appender; + // Even if the appender is stopped it should not throw an exception when we log + try { + ra.append(new LocalLogEvent()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail("Caught exception while logging to an appender of class " + ra.getClass() + + " with stopped=" + ra.isStopped()); + } } private SessionHandle setupSession() throws Exception { @@ -184,4 +197,17 @@ private SessionHandle setupSession() throws Exception { return sessionHandle; } + + /** + * A minimal LogEvent implementation for testing + */ + private static class LocalLogEvent extends AbstractLogEvent { + + LocalLogEvent() { + } + + @Override public Level getLevel() { + return Level.DEBUG; + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/log/HushableRandomAccessFileAppender.java ql/src/java/org/apache/hadoop/hive/ql/log/HushableRandomAccessFileAppender.java new file mode 100644 index 0000000000000000000000000000000000000000..639d1d8eab6068b99b5681e32a717780d5bb0407 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/log/HushableRandomAccessFileAppender.java @@ -0,0 +1,191 @@ +package org.apache.hadoop.hive.ql.log; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache license, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the license for the specific language governing permissions and + * limitations under the license. + */ + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; + +import org.apache.logging.log4j.core.Filter; +import org.apache.logging.log4j.core.Layout; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender; +import org.apache.logging.log4j.core.appender.RandomAccessFileManager; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.config.plugins.PluginAttribute; +import org.apache.logging.log4j.core.config.plugins.PluginConfiguration; +import org.apache.logging.log4j.core.config.plugins.PluginElement; +import org.apache.logging.log4j.core.config.plugins.PluginFactory; +import org.apache.logging.log4j.core.layout.PatternLayout; +import org.apache.logging.log4j.core.net.Advertiser; +import org.apache.logging.log4j.core.util.Booleans; +import org.apache.logging.log4j.core.util.Integers; + +/** + * A File Appender that does not append log records after it has been stopped. + * Based on + * https://github.com/apache/logging-log4j2/blob/c02e7de69e81c174f1ea0be43639d9e231fa5283/log4j-core/src/main/java/org/apache/logging/log4j/core/appender/RandomAccessFileAppender.java + * (which is from log4j 2.6.2) + */ +@Plugin(name = "HushableRandomAccessFile", category = "Core", elementType = "appender", printObject = true) +public final class HushableRandomAccessFileAppender extends + AbstractOutputStreamAppender { + + private final String fileName; + private Object advertisement; + private final Advertiser advertiser; + + private HushableRandomAccessFileAppender(final String name, final Layout layout, + final Filter filter, final RandomAccessFileManager manager, final String filename, + final boolean ignoreExceptions, final boolean immediateFlush, final Advertiser advertiser) { + + super(name, layout, filter, ignoreExceptions, immediateFlush, manager); + if (advertiser != null) { + final Map configuration = new HashMap<>( + layout.getContentFormat()); + configuration.putAll(manager.getContentFormat()); + configuration.put("contentType", layout.getContentType()); + configuration.put("name", name); + advertisement = advertiser.advertise(configuration); + } + this.fileName = filename; + this.advertiser = advertiser; + } + + @Override + public void stop() { + super.stop(); + if (advertiser != null) { + advertiser.unadvertise(advertisement); + } + } + + /** + * Write the log entry rolling over the file when required. + * + * @param event The LogEvent. + */ + @Override + public void append(final LogEvent event) { + // Unlike RandomAccessFileAppender, do not append log when stopped. + if (isStopped()) { + // Don't try to log anything when appender is stopped + return; + } + + // Leverage the nice batching behaviour of async Loggers/Appenders: + // we can signal the file manager that it needs to flush the buffer + // to disk at the end of a batch. + // From a user's point of view, this means that all log events are + // _always_ available in the log file, without incurring the overhead + // of immediateFlush=true. + getManager().setEndOfBatch(event.isEndOfBatch()); // FIXME manager's EndOfBatch threadlocal can be deleted + + // LOG4J2-1292 utilize gc-free Layout.encode() method: taken care of in superclass + super.append(event); + } + + /** + * Returns the file name this appender is associated with. + * + * @return The File name. + */ + public String getFileName() { + return this.fileName; + } + + /** + * Returns the size of the file manager's buffer. + * @return the buffer size + */ + public int getBufferSize() { + return getManager().getBufferSize(); + } + + // difference from standard File Appender: + // locking is not supported and buffering cannot be switched off + /** + * Create a File Appender. + * + * @param fileName The name and path of the file. + * @param append "True" if the file should be appended to, "false" if it + * should be overwritten. The default is "true". + * @param name The name of the Appender. + * @param immediateFlush "true" if the contents should be flushed on every + * write, "false" otherwise. The default is "true". + * @param bufferSizeStr The buffer size, defaults to {@value RandomAccessFileManager#DEFAULT_BUFFER_SIZE}. + * @param ignore If {@code "true"} (default) exceptions encountered when appending events are logged; otherwise + * they are propagated to the caller. + * @param layout The layout to use to format the event. If no layout is + * provided the default PatternLayout will be used. + * @param filter The filter, if any, to use. + * @param advertise "true" if the appender configuration should be + * advertised, "false" otherwise. + * @param advertiseURI The advertised URI which can be used to retrieve the + * file contents. + * @param config The Configuration. + * @return The FileAppender. + */ + @PluginFactory + public static HushableRandomAccessFileAppender createAppender( + @PluginAttribute("fileName") final String fileName, + @PluginAttribute("append") final String append, + @PluginAttribute("name") final String name, + @PluginAttribute("immediateFlush") final String immediateFlush, + @PluginAttribute("bufferSize") final String bufferSizeStr, + @PluginAttribute("ignoreExceptions") final String ignore, + @PluginElement("Layout") Layout layout, + @PluginElement("Filter") final Filter filter, + @PluginAttribute("advertise") final String advertise, + @PluginAttribute("advertiseURI") final String advertiseURI, + @PluginConfiguration final Configuration config) { + + final boolean isAppend = Booleans.parseBoolean(append, true); + final boolean isFlush = Booleans.parseBoolean(immediateFlush, true); + final boolean ignoreExceptions = Booleans.parseBoolean(ignore, true); + final boolean isAdvertise = Boolean.parseBoolean(advertise); + final int bufferSize = Integers.parseInt(bufferSizeStr, 256 * 1024 /* RandomAccessFileManager.DEFAULT_BUFFER_SIZE */); + + if (name == null) { + LOGGER.error("No name provided for FileAppender"); + return null; + } + + if (fileName == null) { + LOGGER.error("No filename provided for FileAppender with name " + + name); + return null; + } + if (layout == null) { + layout = PatternLayout.createDefaultLayout(); + } + final RandomAccessFileManager manager = RandomAccessFileManager.getFileManager( + fileName, isAppend, isFlush, bufferSize, advertiseURI, layout + // , config -- needed in later log4j versions + ); + if (manager == null) { + return null; + } + + return new HushableRandomAccessFileAppender( + name, layout, filter, manager, fileName, ignoreExceptions, isFlush, + isAdvertise ? config.getAdvertiser() : null + ); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java index c6a5341007b566fe1c87c6c6e41462c21480ff3e..b5e8b957440efe57ffb202616bcee6ada54320de 100644 --- ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java +++ ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hive.ql.log; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.Map; import java.util.regex.Pattern; import org.apache.hadoop.hive.common.LogUtils; @@ -28,14 +25,11 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.session.OperationLog; import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.Appender; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.LoggerContext; -import org.apache.logging.log4j.core.appender.RandomAccessFileAppender; import org.apache.logging.log4j.core.appender.routing.Route; import org.apache.logging.log4j.core.appender.routing.Routes; import org.apache.logging.log4j.core.appender.routing.RoutingAppender; -import org.apache.logging.log4j.core.config.AppenderControl; import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.LoggerConfig; import org.apache.logging.log4j.core.config.Node; @@ -208,11 +202,11 @@ public static void registerRoutingAppender(org.apache.hadoop.conf.Configuration Node node = new Node(null, "Route", type); PluginEntry childEntry = new PluginEntry(); - childEntry.setClassName(RandomAccessFileAppender.class.getName()); - childEntry.setKey("randomaccessfile"); + childEntry.setClassName(HushableRandomAccessFileAppender.class.getName()); + childEntry.setKey("HushableMutableRandomAccess"); childEntry.setName("appender"); - PluginType childType = new PluginType(childEntry, RandomAccessFileAppender.class, "appender"); - Node childNode = new Node(node, "RandomAccessFile", childType); + PluginType childType = new PluginType<>(childEntry, HushableRandomAccessFileAppender.class, "appender"); + Node childNode = new Node(node, "HushableMutableRandomAccess", childType); childNode.getAttributes().put("name", "query-file-appender"); childNode.getAttributes().put("fileName", logLocation + "/${ctx:sessionId}/${ctx:queryId}"); node.getChildren().add(childNode); @@ -221,7 +215,7 @@ public static void registerRoutingAppender(org.apache.hadoop.conf.Configuration filterEntry.setClassName(NameFilter.class.getName()); filterEntry.setKey("namefilter"); filterEntry.setName("namefilter"); - PluginType filterType = new PluginType(filterEntry, NameFilter.class, "filter"); + PluginType filterType = new PluginType<>(filterEntry, NameFilter.class, "filter"); Node filterNode = new Node(childNode, "NameFilter", filterType); filterNode.getAttributes().put("loggingLevel", loggingMode.name()); childNode.getChildren().add(filterNode); @@ -230,7 +224,7 @@ public static void registerRoutingAppender(org.apache.hadoop.conf.Configuration layoutEntry.setClassName(PatternLayout.class.getName()); layoutEntry.setKey("patternlayout"); layoutEntry.setName("layout"); - PluginType layoutType = new PluginType(layoutEntry, PatternLayout.class, "layout"); + PluginType layoutType = new PluginType<>(layoutEntry, PatternLayout.class, "layout"); Node layoutNode = new Node(childNode, "PatternLayout", layoutType); layoutNode.getAttributes().put("pattern", layout); childNode.getChildren().add(layoutNode); diff --git ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppenderForTest.java ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppenderForTest.java index e8da18bc66db14267c0bf35f342b2c7213bfd6e7..6c6178681dccc29c8b8b96be49bcd52ff15e792b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppenderForTest.java +++ ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppenderForTest.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.LoggerContext; -import org.apache.logging.log4j.core.appender.RandomAccessFileAppender; import org.apache.logging.log4j.core.appender.routing.Route; import org.apache.logging.log4j.core.appender.routing.Routes; import org.apache.logging.log4j.core.appender.routing.RoutingAppender; @@ -115,13 +114,13 @@ public static void registerRoutingAppenderIfInTest(org.apache.hadoop.conf.Config PluginEntry nullAppenderEntry = new PluginEntry(); nullAppenderEntry.setClassName(NullAppender.class.getName()); PluginType nullAppenderType = - new PluginType(nullAppenderEntry, NullAppender.class, "appender"); + new PluginType<>(nullAppenderEntry, NullAppender.class, "appender"); Node nullAppenderChildNode = new Node(null, "test-null-appender", nullAppenderType); // Create default route where events go without queryId PluginEntry defaultRouteEntry = new PluginEntry(); defaultRouteEntry.setClassName(Route.class.getName()); - PluginType defaultRouteType = new PluginType(defaultRouteEntry, Route.class, ""); + PluginType defaultRouteType = new PluginType<>(defaultRouteEntry, Route.class, ""); Node defaultRouteNode = new Node(null, "test-route-default", defaultRouteType); // Add the test-null-appender to the default route defaultRouteNode.getChildren().add(nullAppenderChildNode); @@ -129,15 +128,15 @@ public static void registerRoutingAppenderIfInTest(org.apache.hadoop.conf.Config // Create queryId based route PluginEntry queryIdRouteEntry = new PluginEntry(); queryIdRouteEntry.setClassName(Route.class.getName()); - PluginType queryIdRouteType = new PluginType(queryIdRouteEntry, Route.class, ""); + PluginType queryIdRouteType = new PluginType<>(queryIdRouteEntry, Route.class, ""); Node queryIdRouteNode = new Node(null, "test-route-mdc", queryIdRouteType); // Create the queryId appender for the queryId route PluginEntry queryIdAppenderEntry = new PluginEntry(); - queryIdAppenderEntry.setClassName(RandomAccessFileAppender.class.getName()); - PluginType queryIdAppenderType = - new PluginType(queryIdAppenderEntry, - RandomAccessFileAppender.class, "appender"); + queryIdAppenderEntry.setClassName(HushableRandomAccessFileAppender.class.getName()); + PluginType queryIdAppenderType = + new PluginType<>(queryIdAppenderEntry, + HushableRandomAccessFileAppender.class, "appender"); Node queryIdAppenderNode = new Node(queryIdRouteNode, "test-query-file-appender", queryIdAppenderType); queryIdAppenderNode.getAttributes().put("fileName", logLocation @@ -150,7 +149,7 @@ public static void registerRoutingAppenderIfInTest(org.apache.hadoop.conf.Config PluginEntry filterEntry = new PluginEntry(); filterEntry.setClassName(TestFilter.class.getName()); PluginType filterType = - new PluginType(filterEntry, TestFilter.class, ""); + new PluginType<>(filterEntry, TestFilter.class, ""); Node filterNode = new Node(queryIdAppenderNode, "test-filter", filterType); // Add the filter to the queryId appender queryIdAppenderNode.getChildren().add(filterNode); @@ -159,7 +158,7 @@ public static void registerRoutingAppenderIfInTest(org.apache.hadoop.conf.Config PluginEntry layoutEntry = new PluginEntry(); layoutEntry.setClassName(PatternLayout.class.getName()); PluginType layoutType = - new PluginType(layoutEntry, PatternLayout.class, ""); + new PluginType<>(layoutEntry, PatternLayout.class, ""); Node layoutNode = new Node(queryIdAppenderNode, "PatternLayout", layoutType); layoutNode.getAttributes().put("pattern", LogDivertAppender.nonVerboseLayout); // Add the layout to the queryId appender