diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java index b15928c..2b139f1 100644 --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -26,6 +26,8 @@ import java.util.BitSet; import java.util.List; +import javax.security.auth.login.LoginException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -39,6 +41,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Shell; @@ -501,10 +504,10 @@ public static boolean mkdir(FileSystem fs, Path f, boolean inheritPerms, Configu * Copies files between filesystems. */ public static boolean copy(FileSystem srcFS, Path src, - FileSystem dstFS, Path dst, - boolean deleteSource, - boolean overwrite, - HiveConf conf) throws IOException { + FileSystem dstFS, Path dst, + boolean deleteSource, + boolean overwrite, + HiveConf conf) throws IOException { boolean copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf); boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); if (copied && inheritPerms) { @@ -524,4 +527,25 @@ public static boolean copy(FileSystem srcFS, Path src, } return copied; } + + /** + * Returns the HDFS base scratch dir path string. + * When the query is run by HiveServer2 and doAs is set to true, + * the scratch dir is owned by the connecting user, + * else it is owned by the user running the HiveServer2 process. + * @param hiveConf + * @return + * @throws LoginException + * @throws IOException + */ + public static String getHDFSBaseScratchDir(Configuration conf) { + UserGroupInformation currentUGI; + try { + currentUGI = ShimLoader.getHadoopShims().getUGIForConf(conf); + } catch (Exception e) { + throw new RuntimeException("Unable to get current user", e); + } + return HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPREFIX) + + currentUGI.getShortUserName(); + } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index dcfe29a..cdd825b 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -178,8 +178,11 @@ SCRIPTWRAPPER("hive.exec.script.wrapper", null), PLAN("hive.exec.plan", ""), PLAN_SERIALIZATION("hive.plan.serialization.format","kryo"), - SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive-" + System.getProperty("user.name")), + SCRATCHDIRPREFIX("hive.exec.scratchdir.prefix", "/tmp/hive-"), + SCRATCHDIR("hive.exec.scratchdir", "${hive.exec.scratchdir.prefix}" + System.getProperty("user.name")), LOCALSCRATCHDIR("hive.exec.local.scratchdir", System.getProperty("java.io.tmpdir") + File.separator + System.getProperty("user.name")), + DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir", + System.getProperty("java.io.tmpdir") + File.separator + "${hive.session.id}_resources"), SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700"), SUBMITVIACHILD("hive.exec.submitviachild", false), SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000), @@ -205,8 +208,6 @@ DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000), DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100), MAXCREATEDFILES("hive.exec.max.created.files", 100000L), - DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir", - System.getProperty("java.io.tmpdir") + File.separator + "${hive.session.id}_resources"), DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__"), DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__"), // Whether to show a link to the most failed task + debugging tips diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index abc4290..3eaf02f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -18,6 +18,18 @@ package org.apache.hadoop.hive.ql; +import java.io.DataInput; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ConcurrentHashMap; + import org.antlr.runtime.TokenRewriteStream; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -30,32 +42,15 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.TaskRunner; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.lockmgr.HiveLock; import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; -import java.io.DataInput; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.URI; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.ConcurrentHashMap; - -import javax.security.auth.login.LoginException; - /** * Context for Semantic Analyzers. Usage: not reusable - construct a new one for * each query should call clear() at end of use to remove temporary folders @@ -124,11 +119,9 @@ public Context(Configuration conf, String executionId) { // local & non-local tmp location is configurable. however it is the same across // all external file systems - nonLocalScratchPath = - new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR), - executionId); + nonLocalScratchPath = new Path(FileUtils.getHDFSBaseScratchDir(conf), executionId); localScratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), - executionId).toUri().getPath(); + executionId).toUri().getPath(); scratchDirPermission= HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); } @@ -197,7 +190,7 @@ public String getCmd () { * @param scratchDir path of tmp directory */ private Path getScratchDir(String scheme, String authority, - boolean mkdir, String scratchDir) { + boolean mkdir, String scratchDir) { String fileSystem = scheme + ":" + authority; Path dir = fsScratchDirs.get(fileSystem + "-" + TaskRunner.getTaskRunnerID()); @@ -209,11 +202,12 @@ private Path getScratchDir(String scheme, String authority, try { FileSystem fs = dirPath.getFileSystem(conf); dirPath = new Path(fs.makeQualified(dirPath).toString()); - FsPermission fsPermission = new FsPermission(Short.parseShort(scratchDirPermission.trim(), 8)); - - if (!Utilities.createDirsWithPermission(conf, dirPath, fsPermission)) { - throw new RuntimeException("Cannot make directory: " - + dirPath.toString()); + if (!fs.mkdirs(dirPath)) { + throw new RuntimeException("Cannot make directory: " + dirPath.toString()); + } + else { + FsPermission fsPermission = new FsPermission(Short.parseShort(scratchDirPermission.trim(), 8)); + fs.setPermission(dirPath, fsPermission); } if (isHDFSCleanup) { fs.deleteOnExit(dirPath); @@ -239,7 +233,7 @@ public Path getLocalScratchDir(boolean mkdir) { FileSystem fs = FileSystem.getLocal(conf); URI uri = fs.getUri(); return getScratchDir(uri.getScheme(), uri.getAuthority(), - mkdir, localScratchDir); + mkdir, localScratchDir); } catch (IOException e) { throw new RuntimeException (e); } @@ -263,7 +257,7 @@ public Path getMRScratchDir() { URI uri = dir.toUri(); Path newScratchDir = getScratchDir(uri.getScheme(), uri.getAuthority(), - !explain, uri.getPath()); + !explain, uri.getPath()); LOG.info("New scratch dir is " + newScratchDir); return newScratchDir; } catch (IOException e) { @@ -276,7 +270,7 @@ public Path getMRScratchDir() { private Path getExternalScratchDir(URI extURI) { return getScratchDir(extURI.getScheme(), extURI.getAuthority(), - !explain, nonLocalScratchPath.toUri().getPath()); + !explain, nonLocalScratchPath.toUri().getPath()); } /** @@ -289,7 +283,7 @@ private void removeScratchDir() { p.getFileSystem(conf).delete(p, true); } catch (Exception e) { LOG.warn("Error Removing Scratch: " - + StringUtils.stringifyException(e)); + + StringUtils.stringifyException(e)); } } fsScratchDirs.clear(); @@ -311,7 +305,7 @@ private String nextPathId() { */ public boolean isMRTmpFileURI(String uriStr) { return (uriStr.indexOf(executionId) != -1) && - (uriStr.indexOf(MR_PREFIX) != -1); + (uriStr.indexOf(MR_PREFIX) != -1); } /** @@ -321,7 +315,7 @@ public boolean isMRTmpFileURI(String uriStr) { */ public Path getMRTmpPath() { return new Path(getMRScratchDir(), MR_PREFIX + - nextPathId()); + nextPathId()); } /** @@ -342,7 +336,7 @@ public Path getLocalTmpPath() { */ public Path getExternalTmpPath(URI extURI) { return new Path(getExternalScratchDir(extURI), EXT_PREFIX + - nextPathId()); + nextPathId()); } /** @@ -351,8 +345,8 @@ public Path getExternalTmpPath(URI extURI) { * path within /tmp */ public Path getExtTmpPathRelTo(URI uri) { - return new Path (getScratchDir(uri.getScheme(), uri.getAuthority(), !explain, - uri.getPath() + Path.SEPARATOR + "_" + this.executionId), EXT_PREFIX + nextPathId()); + return new Path (getScratchDir(uri.getScheme(), uri.getAuthority(), !explain, + uri.getPath() + Path.SEPARATOR + "_" + this.executionId), EXT_PREFIX + nextPathId()); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 7c175aa..7cbb9f7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -38,13 +38,11 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -116,7 +114,6 @@ import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; @@ -410,7 +407,7 @@ public int execute(DriverContext driverContext) { return showTxns(txnsDesc); } - LockTableDesc lockTbl = work.getLockTblDesc(); + LockTableDesc lockTbl = work.getLockTblDesc(); if (lockTbl != null) { return lockTable(lockTbl); } @@ -1075,7 +1072,7 @@ private int alterDatabase(AlterDatabaseDesc alterDbDesc) throws HiveException { params.putAll(newParams); database.setParameters(params); } else { // if one of them is null, replace the old params with the new - // one + // one database.setParameters(newParams); } } @@ -1271,7 +1268,7 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD assert(tbl.isPartitioned()); List newPartitionKeys = new ArrayList(); - + //Check if the existing partition values can be type casted to the new column type // with a non null value before trying to alter the partition column type. try { @@ -1283,19 +1280,19 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD break; } } - + if (colIndex == -1 || colIndex == tbl.getTTable().getPartitionKeys().size()) { - throw new HiveException("Cannot find partition column " + + throw new HiveException("Cannot find partition column " + alterPartitionDesc.getPartKeySpec().getName()); } - + TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(alterPartitionDesc.getPartKeySpec().getType()); ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); Converter converter = ObjectInspectorConverters.getConverter( - PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); - + PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); + // For all the existing partitions, check if the value can be type casted to a non-null object for(Partition part : partitions) { if (part.getName().equals(conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) { @@ -1304,23 +1301,23 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD try { String value = part.getValues().get(colIndex); Object convertedValue = - converter.convert(value); + converter.convert(value); if (convertedValue == null) { throw new HiveException(" Converting from " + TypeInfoFactory.stringTypeInfo + " to " + - expectedType + " for value : " + value + " resulted in NULL object"); + expectedType + " for value : " + value + " resulted in NULL object"); } } catch (Exception e) { - throw new HiveException("Exception while converting " + + throw new HiveException("Exception while converting " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + " for value : " + part.getValues().get(colIndex)); - } + } } } catch(Exception e) { throw new HiveException( "Exception while checking type conversion of existing partition values to " + - alterPartitionDesc.getPartKeySpec() + " : " + e.getMessage()); + alterPartitionDesc.getPartKeySpec() + " : " + e.getMessage()); } - + for(FieldSchema col : tbl.getTTable().getPartitionKeys()) { if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) { newPartitionKeys.add(alterPartitionDesc.getPartKeySpec()); @@ -2575,7 +2572,7 @@ public int showColumns(Hive db, ShowColumnsDesc showCols) cols.addAll(table.getPartCols()); // In case the query is served by HiveServer2, don't pad it with spaces, // as HiveServer2 output is consumed by JDBC/ODBC clients. - boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); + boolean isOutputPadded = !SessionState.isHiveServerQuery(); outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation( cols, false, isOutputPadded)); outStream.close(); @@ -2924,7 +2921,7 @@ private int showTxns(ShowTxnsDesc desc) throws HiveException { return 0; } - /** + /** * Lock the table/partition specified * * @param lockTbl @@ -3406,9 +3403,9 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException { cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ? tbl.getCols() : part.getCols(); - if (!descTbl.isFormatted()) { - cols.addAll(tbl.getPartCols()); - } + if (!descTbl.isFormatted()) { + cols.addAll(tbl.getPartCols()); + } } else { cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer()); } @@ -3416,7 +3413,7 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException { fixDecimalColumnTypeName(cols); // In case the query is served by HiveServer2, don't pad it with spaces, // as HiveServer2 output is consumed by JDBC/ODBC clients. - boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); + boolean isOutputPadded = !SessionState.isHiveServerQuery(); formatter.describeTable(outStream, colPath, tableName, tbl, part, cols, descTbl.isFormatted(), descTbl.isExt(), descTbl.isPretty(), isOutputPadded); @@ -3840,7 +3837,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } tbl.setNumBuckets(alterTbl.getNumberBuckets()); } - } else { + } else { throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, alterTbl.getOp().toString()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 7250432..280af8b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -95,7 +95,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.HiveInterruptCallback; import org.apache.hadoop.hive.common.HiveInterruptUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; @@ -414,7 +413,7 @@ public static void setWorkflowAdjacencies(Configuration conf, QueryPlan plan) { @Override protected Expression instantiate(Object oldInstance, Encoder out) { return new Expression(Enum.class, "valueOf", new Object[] {oldInstance.getClass(), - ((Enum) oldInstance).name()}); + ((Enum) oldInstance).name()}); } @Override @@ -758,11 +757,11 @@ public void write(Kryo kryo, Output output, Timestamp ts) { } } - /** Custom Kryo serializer for sql date, otherwise Kryo gets confused between + /** Custom Kryo serializer for sql date, otherwise Kryo gets confused between java.sql.Date and java.util.Date while deserializing */ private static class SqlDateSerializer extends - com.esotericsoftware.kryo.Serializer { + com.esotericsoftware.kryo.Serializer { @Override public java.sql.Date read(Kryo kryo, Input input, Class clazz) { @@ -782,7 +781,7 @@ public CommonToken read(Kryo kryo, Input input, Class clazz) { } @Override - public void write(Kryo kryo, Output output, CommonToken token) { + public void write(Kryo kryo, Output output, CommonToken token) { output.writeInt(token.getType()); output.writeString(token.getText()); } @@ -1101,10 +1100,10 @@ public static TableDesc getTableDesc(Table tbl) { public static TableDesc getTableDesc(String cols, String colTypes) { return (new TableDesc(SequenceFileInputFormat.class, HiveSequenceFileOutputFormat.class, Utilities.makeProperties( - serdeConstants.SERIALIZATION_FORMAT, "" + Utilities.ctrlaCode, - serdeConstants.LIST_COLUMNS, cols, - serdeConstants.LIST_COLUMN_TYPES, colTypes, - serdeConstants.SERIALIZATION_LIB,LazySimpleSerDe.class.getName()))); + serdeConstants.SERIALIZATION_FORMAT, "" + Utilities.ctrlaCode, + serdeConstants.LIST_COLUMNS, cols, + serdeConstants.LIST_COLUMN_TYPES, colTypes, + serdeConstants.SERIALIZATION_LIB,LazySimpleSerDe.class.getName()))); } public static PartitionDesc getPartitionDesc(Partition part) throws HiveException { @@ -1522,7 +1521,7 @@ public static void rename(FileSystem fs, Path src, Path dst) throws IOException, * @throws IOException */ public static void renameOrMoveFiles(FileSystem fs, Path src, Path dst) throws IOException, - HiveException { + HiveException { if (!fs.exists(dst)) { if (!fs.rename(src, dst)) { throw new HiveException("Unable to move: " + src + " to: " + dst); @@ -1766,7 +1765,7 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, */ private static void createEmptyBuckets(Configuration hconf, ArrayList paths, FileSinkDesc conf, Reporter reporter) - throws HiveException, IOException { + throws HiveException, IOException { JobConf jc; if (hconf instanceof JobConf) { @@ -1827,7 +1826,7 @@ public static void removeTempOrDuplicateFiles(FileSystem fs, Path path) throws I for (int i = 0; i < parts.length; ++i) { assert parts[i].isDir() : "dynamic partition " + parts[i].getPath() - + " is not a direcgtory"; + + " is not a direcgtory"; FileStatus[] items = fs.listStatus(parts[i].getPath()); // remove empty directory since DP insert should not generate empty partitions. @@ -2211,7 +2210,7 @@ public void interrupt() { try { new Path(path).getFileSystem(ctx.getConf()).close(); } catch (IOException ignore) { - LOG.debug(ignore); + LOG.debug(ignore); } } if (executor != null) { @@ -2449,7 +2448,7 @@ private static void getMRTasks(List> tasks, List fullPartSpec = new LinkedHashMap(partSpec); @@ -2557,7 +2556,7 @@ public static void setColumnTypeList(JobConf jobConf, Operator op, boolean exclu public static Path generatePath(Path basePath, String dumpFilePrefix, Byte tag, String bigBucketFileName) { return new Path(basePath, "MapJoin-" + dumpFilePrefix + tag + - "-" + bigBucketFileName + suffix); + "-" + bigBucketFileName + suffix); } public static String generateFileName(Byte tag, String bigBucketFileName) { @@ -2677,7 +2676,7 @@ public T run(PreparedStatement stmt) throws SQLException { try { Thread.sleep(waitTime); } catch (InterruptedException iex) { - } + } } catch (SQLException e) { // throw other types of SQLExceptions (SQLNonTransientException / SQLRecoverableException) throw e; @@ -2784,8 +2783,8 @@ public static PreparedStatement prepareWithRetry(Connection conn, String stmt, */ public static long getRandomWaitTime(int baseWindow, int failures, Random r) { return (long) ( - baseWindow * failures + // grace period for the last round of attempt - baseWindow * (failures + 1) * r.nextDouble()); // expanding time window for each failure + baseWindow * failures + // grace period for the last round of attempt + baseWindow * (failures + 1) * r.nextDouble()); // expanding time window for each failure } public static final char sqlEscapeChar = '\\'; @@ -2863,7 +2862,7 @@ public static String formatMsecToStr(long msec) { * @return the number of reducers. */ public static int estimateNumberOfReducers(HiveConf conf, ContentSummary inputSummary, - MapWork work, boolean finalMapRed) throws IOException { + MapWork work, boolean finalMapRed) throws IOException { long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); @@ -2876,7 +2875,7 @@ public static int estimateNumberOfReducers(HiveConf conf, ContentSummary inputSu + maxReducers + " estimated totalInputFileSize=" + totalInputFileSize); } else { LOG.info("BytesPerReducer=" + bytesPerReducer + " maxReducers=" - + maxReducers + " totalInputFileSize=" + totalInputFileSize); + + maxReducers + " totalInputFileSize=" + totalInputFileSize); } // If this map reduce job writes final data to a table and bucketing is being inferred, @@ -3037,7 +3036,7 @@ public static double getHighestSamplePercentage (MapWork work) { if (!HiveConf.getVar(job, ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") && isEmptyPath(job, path, ctx)) { path = createDummyFileForEmptyPartition(path, job, work, - hiveScratchDir, alias, sequenceNumber++); + hiveScratchDir, alias, sequenceNumber++); } pathsToAdd.add(path); @@ -3202,7 +3201,7 @@ public static void setInputPaths(JobConf job, List pathsToAdd) { */ public static void setInputAttributes(Configuration conf, MapWork mWork) { HiveConf.ConfVars var = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ? - HiveConf.ConfVars.HIVETEZINPUTFORMAT : HiveConf.ConfVars.HIVEINPUTFORMAT; + HiveConf.ConfVars.HIVETEZINPUTFORMAT : HiveConf.ConfVars.HIVEINPUTFORMAT; if (mWork.getInputformat() != null) { HiveConf.setVar(conf, var, mWork.getInputformat()); } @@ -3229,7 +3228,7 @@ public static void createTmpDirs(Configuration conf, MapWork mWork) Map> pa = mWork.getPathToAliases(); if (pa != null) { List> ops = - new ArrayList>(); + new ArrayList>(); for (List ls : pa.values()) { for (String a : ls) { ops.add(mWork.getAliasToWork().get(a)); @@ -3254,28 +3253,24 @@ public static void createTmpDirs(Configuration conf, ReduceWork rWork) return; } List> ops - = new LinkedList>(); + = new LinkedList>(); ops.add(rWork.getReducer()); createTmpDirs(conf, ops); } private static void createTmpDirs(Configuration conf, List> ops) throws IOException { - - FsPermission fsPermission = new FsPermission((short)00777); while (!ops.isEmpty()) { Operator op = ops.remove(0); - if (op instanceof FileSinkOperator) { FileSinkDesc fdesc = ((FileSinkOperator) op).getConf(); Path tempDir = fdesc.getDirName(); - if (tempDir != null) { Path tempPath = Utilities.toTempPath(tempDir); - createDirsWithPermission(conf, tempPath, fsPermission); + FileSystem fs = tempPath.getFileSystem(conf); + fs.mkdirs(tempPath); } } - if (op.getChildOperators() != null) { ops.addAll(op.getChildOperators()); } @@ -3296,7 +3291,7 @@ public static boolean isVectorMode(Configuration conf) { return false; } - public static void clearWorkMap() { + public static void clearWorkMap() { gWorkMap.clear(); } @@ -3322,7 +3317,7 @@ public static File createTempDir(String baseDir){ } } throw new IllegalStateException("Failed to create a temp dir under " - + baseDir + " Giving up after " + MAX_ATTEMPS + " attemps"); + + baseDir + " Giving up after " + MAX_ATTEMPS + " attemps"); } @@ -3396,57 +3391,6 @@ public static int getFooterCount(TableDesc table, JobConf job) throws IOExceptio } /** - * @param conf the configuration used to derive the filesystem to create the path - * @param mkdir the path to be created - * @param fsPermission ignored if it is hive server session and doAs is enabled - * @return true if successfully created the directory else false - * @throws IOException if hdfs experiences any error conditions - */ - public static boolean createDirsWithPermission(Configuration conf, Path mkdir, - FsPermission fsPermission) throws IOException { - - boolean recursive = false; - if (SessionState.get() != null) { - recursive = SessionState.get().isHiveServerQuery() && - conf.getBoolean(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, - HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.defaultBoolVal); - // we reset the permission in case of hive server and doAs enabled because - // currently scratch directory uses /tmp/hive-hive as the scratch directory. - // However, with doAs enabled, the first user to create this directory would - // own the directory and subsequent users cannot access the scratch directory. - // The right fix is to have scratch dir per user. - fsPermission = new FsPermission((short)00777); - } - - // if we made it so far without exception we are good! - return createDirsWithPermission(conf, mkdir, fsPermission, recursive); - } - - public static boolean createDirsWithPermission(Configuration conf, Path mkdir, - FsPermission fsPermission, boolean recursive) throws IOException { - String origUmask = null; - - if (recursive) { - origUmask = conf.get("fs.permissions.umask-mode"); - // this umask is required because by default the hdfs mask is 022 resulting in - // all parents getting the fsPermission & !(022) permission instead of fsPermission - conf.set("fs.permissions.umask-mode", "000"); - } - - FileSystem fs = mkdir.getFileSystem(conf); - boolean retval = fs.mkdirs(mkdir, fsPermission); - - if (recursive) { - if (origUmask != null) { - conf.set("fs.permissions.umask-mode", origUmask); - } else { - conf.unset("fs.permissions.umask-mode"); - } - } - return retval; - } - - /** * Convert path to qualified path. * * @param conf diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java index 27e4cd0..cca9369 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -40,12 +39,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -80,7 +76,7 @@ private List localizedResources; private static List openSessions - = Collections.synchronizedList(new LinkedList()); + = Collections.synchronizedList(new LinkedList()); /** * Constructor. We do not automatically connect, because we only want to @@ -132,7 +128,7 @@ public void open(HiveConf conf) * @throws TezException */ public void open(HiveConf conf, String[] additionalFiles) - throws IOException, LoginException, IllegalArgumentException, URISyntaxException, TezException { + throws IOException, LoginException, IllegalArgumentException, URISyntaxException, TezException { this.conf = conf; UserGroupInformation ugi; @@ -283,19 +279,18 @@ public LocalResource getAppJarLr() { * be used with Tez. Assumes scratchDir exists. */ private Path createTezDir(String sessionId) - throws IOException { - + throws IOException { // tez needs its own scratch dir (per session) - Path tezDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR), TEZ_DIR); + Path tezDir = new Path(FileUtils.getHDFSBaseScratchDir(conf), TEZ_DIR); tezDir = new Path(tezDir, sessionId); FileSystem fs = tezDir.getFileSystem(conf); - FsPermission fsPermission = new FsPermission((short)00777); - Utilities.createDirsWithPermission(conf, tezDir, fsPermission, true); + FsPermission fsPermission = new FsPermission( + conf.getVar(HiveConf.ConfVars.SCRATCHDIRPERMISSION)); + fs.mkdirs(tezDir, fsPermission); // Make sure the path is normalized (we expect validation to pass since we just created it). tezDir = DagUtils.validateTargetDir(tezDir, conf).getPath(); // don't keep the directory around on non-clean exit fs.deleteOnExit(tezDir); - return tezDir; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index dab8610..6638bd0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -75,6 +75,9 @@ public class SessionState { private static final Log LOG = LogFactory.getLog(SessionState.class); + // Is the query served from HiveServer2 + private static boolean IS_HIVE_SERVER_QUERY = false; + protected ClassLoader parentLoader; /** @@ -92,11 +95,6 @@ */ protected boolean isVerbose; - /** - * Is the query served from HiveServer2 - */ - private boolean isHiveServerQuery = false; - /* * HiveHistory Object */ @@ -207,8 +205,12 @@ public boolean getIsSilent() { } } - public boolean isHiveServerQuery() { - return this.isHiveServerQuery; + public static boolean isHiveServerQuery() { + return IS_HIVE_SERVER_QUERY; + } + + public static void setIsHiveServerQuery(boolean isHiveServerQuery) { + IS_HIVE_SERVER_QUERY = isHiveServerQuery; } public void setIsSilent(boolean isSilent) { @@ -226,10 +228,6 @@ public void setIsVerbose(boolean isVerbose) { this.isVerbose = isVerbose; } - public void setIsHiveServerQuery(boolean isHiveServerQuery) { - this.isHiveServerQuery = isHiveServerQuery; - } - public SessionState(HiveConf conf) { this(conf, null); } @@ -348,7 +346,7 @@ public static SessionState start(SessionState startSs) { } if (HiveConf.getVar(startSs.getConf(), HiveConf.ConfVars.HIVE_EXECUTION_ENGINE) - .equals("tez") && (startSs.isHiveServerQuery == false)) { + .equals("tez") && (SessionState.isHiveServerQuery() == false)) { try { if (startSs.tezSessionState == null) { startSs.tezSessionState = new TezSessionState(startSs.getSessionId()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/WindowsPathUtil.java b/ql/src/test/org/apache/hadoop/hive/ql/WindowsPathUtil.java index 294a3dd..f75e746 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/WindowsPathUtil.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/WindowsPathUtil.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.util.Shell; @@ -39,7 +40,7 @@ public static void convertPathsFromWindowsToHdfs(HiveConf conf){ String orgTestWarehouseDir = System.getProperty("test.warehouse.dir"); System.setProperty("test.warehouse.dir", getHdfsUriString(orgTestWarehouseDir)); - String orgScratchDir = conf.getVar(HiveConf.ConfVars.SCRATCHDIR); + String orgScratchDir = FileUtils.getHDFSBaseScratchDir(conf); conf.setVar(HiveConf.ConfVars.SCRATCHDIR, getHdfsUriString(orgScratchDir)); } @@ -49,8 +50,8 @@ public static String getHdfsUriString(String uriStr) { // If the URI conversion is from Windows to HDFS then replace the '\' with '/' // and remove the windows single drive letter & colon from absolute path. return uriStr.replace('\\', '/') - .replaceFirst("/[c-zC-Z]:", "/") - .replaceFirst("^[c-zC-Z]:", ""); + .replaceFirst("/[c-zC-Z]:", "/") + .replaceFirst("^[c-zC-Z]:", ""); } return uriStr; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java index bf3fd88..390a4a6 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java @@ -20,15 +20,12 @@ import static org.apache.hadoop.hive.ql.exec.Utilities.getFileExtension; -import java.io.IOException; import java.sql.Timestamp; import java.util.ArrayList; import java.util.List; import junit.framework.TestCase; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -39,7 +36,6 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFFromUtcTimestamp; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.mapred.JobConf; -import org.junit.Test; public class TestUtilities extends TestCase { @@ -77,9 +73,9 @@ public void testSerializeTimestamp() { List children = new ArrayList(1); children.add(constant); ExprNodeGenericFuncDesc desc = new ExprNodeGenericFuncDesc(TypeInfoFactory.timestampTypeInfo, - new GenericUDFFromUtcTimestamp(), children); + new GenericUDFFromUtcTimestamp(), children); assertEquals(desc.getExprString(), Utilities.deserializeExpression( - Utilities.serializeExpression(desc)).getExprString()); + Utilities.serializeExpression(desc)).getExprString()); } public void testgetDbTableName() throws HiveException{ @@ -109,24 +105,4 @@ public void testgetDbTableName() throws HiveException{ assertEquals("Invalid table name " + tablename, ex.getMessage()); } } - - @Test - public void testFSUmaskReset() throws Exception { - // ensure that FS Umask is not reset (HIVE-7001) - checkFSUMaskReset(true); - checkFSUMaskReset(false); - } - - private void checkFSUMaskReset(boolean recursiveArg) throws IllegalArgumentException, IOException { - final String FS_MASK_PARAM = "fs.permissions.umask-mode"; - final String FS_MASK_VAL = "055"; - HiveConf conf = new HiveConf(); - String dir = System.getProperty("test.tmp.dir") + "/testUtilitiesUMaskReset"; - conf.set(FS_MASK_PARAM, FS_MASK_VAL); - Utilities.createDirsWithPermission(conf, new Path(dir), new FsPermission((short) 00777), - recursiveArg); - assertEquals(conf.get(FS_MASK_PARAM), FS_MASK_VAL); - } - - } diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java b/service/src/java/org/apache/hive/service/cli/CLIService.java index d01bce9..9f5e142 100644 --- a/service/src/java/org/apache/hive/service/cli/CLIService.java +++ b/service/src/java/org/apache/hive/service/cli/CLIService.java @@ -30,9 +30,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -121,16 +118,6 @@ public UserGroupInformation getHttpUGI() { @Override public synchronized void start() { super.start(); - - try { - // make sure that the base scratch directories exists and writable - setupStagingDir(hiveConf.getVar(HiveConf.ConfVars.SCRATCHDIR), false); - setupStagingDir(hiveConf.getVar(HiveConf.ConfVars.LOCALSCRATCHDIR), true); - setupStagingDir(hiveConf.getVar(HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR), true); - } catch (IOException eIO) { - throw new ServiceException("Error setting stage directories", eIO); - } - try { // Initialize and test a connection to the metastore metastoreClient = new HiveMetaStoreClient(hiveConf); @@ -441,22 +428,6 @@ public synchronized String getDelegationTokenFromMetaStore(String owner) } } - // create the give Path if doesn't exists and make it writable - private void setupStagingDir(String dirPath, boolean isLocal) throws IOException { - Path scratchDir = new Path(dirPath); - FileSystem fs; - if (isLocal) { - fs = FileSystem.getLocal(hiveConf); - } else { - fs = scratchDir.getFileSystem(hiveConf); - } - if (!fs.exists(scratchDir)) { - fs.mkdirs(scratchDir); - } - FsPermission fsPermission = new FsPermission((short)0777); - fs.setPermission(scratchDir, fsPermission); - } - @Override public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String owner, String renewer) throws HiveSQLException { diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index a9d5902..34d717c 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -102,7 +102,6 @@ public HiveSessionImpl(TProtocolVersion protocol, String username, String passwo FetchFormatter.ThriftFormatter.class.getName()); hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); sessionState = new SessionState(hiveConf, username); - sessionState.setIsHiveServerQuery(true); SessionState.start(sessionState); } diff --git a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java index 05e742c..19cede2 100644 --- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java +++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java @@ -18,6 +18,8 @@ package org.apache.hive.service.cli.session; +import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -28,12 +30,17 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.hooks.HookUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.service.CompositeService; +import org.apache.hive.service.ServiceException; import org.apache.hive.service.auth.TSetIpAddressProcessor; import org.apache.hive.service.cli.HiveSQLException; import org.apache.hive.service.cli.SessionHandle; @@ -91,9 +98,34 @@ private void applyAuthorizationConfigPolicy(HiveConf newHiveConf) throws HiveExc @Override public synchronized void start() { + String dirPath; + try { + // Create the base HDFS scratch directory + // If doAs is true, we create session specific hdfs scratch + // directories while opening the session & use those. + dirPath = FileUtils.getHDFSBaseScratchDir(hiveConf); + setupStagingDir(dirPath, false); + LOG.info("Created HDFS scratch directory: " + dirPath); + } catch (IOException e) { + throw new ServiceException("Error setting up the HDFS scratch directory", e); + } + + try { + // Create the base local scratch directory + dirPath = hiveConf.getVar(HiveConf.ConfVars.LOCALSCRATCHDIR); + setupStagingDir(dirPath, true); + LOG.info("Created local scratch directory: " + dirPath); + } catch (IOException e) { + throw new ServiceException("Error setting up the local scratch directory", e); + } + // Used for picking some of the code path specific to HiveServer2 + SessionState.setIsHiveServerQuery(true); super.start(); } + /** + * Stop the session manager and kill the background thread pool. + */ @Override public synchronized void stop() { super.stop(); @@ -103,8 +135,8 @@ public synchronized void stop() { try { backgroundOperationPool.awaitTermination(timeout, TimeUnit.SECONDS); } catch (InterruptedException e) { - LOG.warn("HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT = " + timeout + - " seconds has been exceeded. RUNNING background operations will be shut down", e); + LOG.warn(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT.varname + "= " + timeout + + " seconds has been exceeded. RUNNING background operations will be shut down", e); } } } @@ -114,15 +146,53 @@ public SessionHandle openSession(TProtocolVersion protocol, String username, Str return openSession(protocol, username, password, sessionConf, false, null); } + /** + * Open a new user session, and save the session handle + * @param protocol + * @param username + * @param password + * @param sessionConf + * @param withImpersonation + * @param delegationToken + * @return + * @throws HiveSQLException + */ public SessionHandle openSession(TProtocolVersion protocol, String username, String password, Map sessionConf, boolean withImpersonation, String delegationToken) throws HiveSQLException { HiveSession session; + // If doAs is enabled, we wrap each call in HiveSessionImpl in a doAs + // via a proxy HiveSessionProxy object if (withImpersonation) { - HiveSessionImplwithUGI hiveSessionUgi = new HiveSessionImplwithUGI(protocol, username, password, - hiveConf, sessionConf, TSetIpAddressProcessor.getUserIpAddress(), delegationToken); - session = HiveSessionProxy.getProxy(hiveSessionUgi, hiveSessionUgi.getSessionUgi()); - hiveSessionUgi.setProxySession(session); + final HiveSessionImplwithUGI hiveSessionWithUgi = new HiveSessionImplwithUGI(protocol, + username, password, hiveConf, sessionConf, + TSetIpAddressProcessor.getUserIpAddress(), delegationToken); + session = HiveSessionProxy.getProxy(hiveSessionWithUgi, hiveSessionWithUgi.getSessionUgi()); + hiveSessionWithUgi.setProxySession(session); + // Create the hdfs scratch dir as the connecting user, if it doesn't already exist + try { + hiveSessionWithUgi.getSessionUgi().doAs( + new PrivilegedExceptionAction() { + @Override + public Void run() throws HiveSQLException { + // Get hdfs scratch dir path this session + try { + String dirPath = FileUtils.getHDFSBaseScratchDir(hiveConf); + setupStagingDir(dirPath, false); + LOG.info("Created HDFS scratch directory: " + dirPath); + } catch (IOException e) { + throw new HiveSQLException("Error setting up hdfs scratch directory " + + "for the session", e); + } + return null; + } + } + ); + } catch (Exception e) { + LOG.error("Error setting up hdfs scratch directory for the user : " + + hiveSessionWithUgi.getSessionUgi().getShortUserName(), e); + throw new HiveSQLException(e); + } } else { session = new HiveSessionImpl(protocol, username, password, hiveConf, sessionConf, TSetIpAddressProcessor.getUserIpAddress()); @@ -131,7 +201,6 @@ public SessionHandle openSession(TProtocolVersion protocol, String username, Str session.setOperationManager(operationManager); session.open(); handleToSession.put(session.getSessionHandle(), session); - try { executeSessionHooks(session); } catch (Exception e) { @@ -140,6 +209,22 @@ public SessionHandle openSession(TProtocolVersion protocol, String username, Str return session.getSessionHandle(); } + // Create the given Path if doesn't exist + private void setupStagingDir(String dirPath, boolean isLocal) throws IOException { + Path scratchDir = new Path(dirPath); + FileSystem fs; + if (isLocal) { + fs = FileSystem.getLocal(hiveConf); + } else { + fs = scratchDir.getFileSystem(hiveConf); + } + if (!fs.exists(scratchDir)) { + FsPermission fsPermission = new FsPermission( + hiveConf.getVar(HiveConf.ConfVars.SCRATCHDIRPERMISSION)); + fs.mkdirs(scratchDir, fsPermission); + } + } + public void closeSession(SessionHandle sessionHandle) throws HiveSQLException { HiveSession session = handleToSession.remove(sessionHandle); if (session == null) { @@ -227,8 +312,13 @@ private void executeSessionHooks(HiveSession session) throws Exception { } } - public Future submitBackgroundOperation(Runnable r) { - return backgroundOperationPool.submit(r); + /** + * Submits an operation to the background thread pool for async execution + * @param runnable + * @return + */ + public Future submitBackgroundOperation(Runnable runnable) { + return backgroundOperationPool.submit(runnable); } }