From 7dcd2eb17fa74707a18d977718e9ef4ed0010230 Mon Sep 17 00:00:00 2001 From: Ashutosh Chauhan Date: Tue, 2 Feb 2016 18:03:44 -0800 Subject: [PATCH] HIVE-12988 : Improve dynamic partition loading IV --- .../java/org/apache/hadoop/hive/conf/HiveConf.java | 4 +- .../org/apache/hadoop/hive/ql/metadata/Hive.java | 264 +++++++++++++-------- .../java/org/apache/hadoop/fs/ProxyFileSystem.java | 5 +- 3 files changed, 169 insertions(+), 104 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 2723dad..66915ab 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2276,6 +2276,8 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", "set,reset,dfs,add,list,delete,reload,compile", "Comma separated list of non-SQL Hive commands users are authorized to execute"), + HIVE_MOVE_FILES_THREAD_COUNT("hive.mv.files.thread", 25, new SizeValidator(1L, true, 1024L, true), "Number of threads" + + " used to move files in move task"), // If this is set all move tasks at the end of a multi-insert query will only begin once all // outputs are ready HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES( @@ -2716,7 +2718,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5", "Name of the SASL mechanism to use for authentication."), SPARK_RPC_SERVER_ADDRESS("hive.spark.client.rpc.server.address", "", - "The server address of HiverServer2 host to be used for communication between Hive client and remote Spark driver. " + + "The server address of HiverServer2 host to be used for communication between Hive client and remote Spark driver. " + "Default is empty, which means the address will be determined in the same way as for hive.server2.thrift.bind.host." + "This is only necessary if the host has mutiple network addresses and if a different network address other than " + "hive.server2.thrift.bind.host is to be used."), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index ad17096..5108a48 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -32,19 +32,24 @@ import java.io.PrintStream; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import com.google.common.collect.ImmutableMap; + import javax.jdo.JDODataStoreException; import org.apache.hadoop.conf.Configuration; @@ -131,6 +136,12 @@ import org.slf4j.LoggerFactory; import com.google.common.collect.Sets; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * This class has functions that implement meta data/DDL operations using calls @@ -1493,7 +1504,7 @@ public Partition loadPartition(Path loadPath, Table tbl, isSrcLocal); } else { if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null) { - newFiles = new ArrayList<>(); + newFiles = Collections.synchronizedList(new ArrayList()); } FileSystem fs = tbl.getDataLocation().getFileSystem(conf); @@ -1740,9 +1751,13 @@ private void constructOneLBLocationMap(FileStatus fSta, public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid) throws HiveException { - List newFiles = new ArrayList(); + + List newFiles = null; Table tbl = getTable(tableName); HiveConf sessionConf = SessionState.getSessionConf(); + if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { + newFiles = Collections.synchronizedList(new ArrayList()); + } if (replace) { Path tableDest = tbl.getPath(); replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal); @@ -2568,75 +2583,102 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, } } - // for each file or directory in 'srcs', make mapping for every file in src to safe name in dest - private static List> checkPaths(HiveConf conf, FileSystem fs, - FileStatus[] srcs, FileSystem srcFs, Path destf, boolean replace) - throws HiveException { + private static void copyFiles(final HiveConf conf, final FileSystem destFs, + FileStatus[] srcs, final FileSystem srcFs, final Path destf, final boolean isSrcLocal, final List newFiles) + throws HiveException { - List> result = new ArrayList>(); - try { - FileStatus destStatus = !replace ? FileUtils.getFileStatusOrNull(fs, destf) : null; - if (destStatus != null && !destStatus.isDir()) { - throw new HiveException("checkPaths: destination " + destf - + " should be a directory"); + final HadoopShims.HdfsFileStatus fullDestStatus; + try { + fullDestStatus = ShimLoader.getHadoopShims().getFullFileStatus(conf, destFs, destf); + } catch (IOException e1) { + throw new HiveException(e1); + } + + if (!fullDestStatus.getFileStatus().isDirectory()) { + throw new HiveException(destf + " is not a directory."); } + final boolean inheritPerms = HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); + final ListeningExecutorService pool = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool( + conf.getIntVar(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT), + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("MoveDir-Thread-%d").build())); + + final List exceptions = Collections.synchronizedList(new LinkedList()); for (FileStatus src : srcs) { - FileStatus[] items; - if (src.isDir()) { - items = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); - Arrays.sort(items); + FileStatus[] files; + if (src.isDirectory()) { + try { + files = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); + } catch (IOException e) { + pool.shutdownNow(); + exceptions.add(e); + break; + } } else { - items = new FileStatus[] {src}; + files = new FileStatus[] {src}; } - List srcToDest = new ArrayList(); - for (FileStatus item : items) { - - Path itemSource = item.getPath(); - - if (Utilities.isTempPath(item)) { - // This check is redundant because temp files are removed by - // execution layer before - // calling loadTable/Partition. But leaving it in just in case. - srcFs.delete(itemSource, true); - continue; + for (FileStatus srcFile : files) { + + final Path srcP = srcFile.getPath(); + final boolean needToCopy = needToCopy(srcP, destf, srcFs, destFs); + // Strip off the file type, if any so we don't make: + // 000000_0.gz -> 000000_0.gz_copy_1 + final String name; + final String filetype; + String itemName = srcP.getName(); + int index = itemName.lastIndexOf('.'); + if (index >= 0) { + filetype = itemName.substring(index); + name = itemName.substring(0, index); + } else { + name = itemName; + filetype = ""; } + final ListenableFuture> future = pool.submit(new Callable>() { + @Override + public ObjectPair call() throws Exception { + Path destPath = new Path(destf, srcP.getName()); + if (!needToCopy && !isSrcLocal) { + for (int counter = 1; !destFs.rename(srcP,destPath); counter++) { + destPath = new Path(destf, name + ("_copy_" + counter) + filetype); + } + } else { + destPath = mvFile(conf, srcP, destPath, isSrcLocal, srcFs, destFs, filetype); + } - Path itemDest = new Path(destf, itemSource.getName()); - - if (!replace) { - // Strip off the file type, if any so we don't make: - // 000000_0.gz -> 000000_0.gz_copy_1 - String name = itemSource.getName(); - String filetype; - int index = name.lastIndexOf('.'); - if (index >= 0) { - filetype = name.substring(index); - name = name.substring(0, index); - } else { - filetype = ""; + if (inheritPerms) { + ShimLoader.getHadoopShims().setFullFileStatus(conf, fullDestStatus, destFs, destf); + } + if (null != newFiles) { + newFiles.add(destPath); + } + return ObjectPair.create(srcP, destPath); } - // It's possible that the file we're copying may have the same - // relative name as an existing file in the "destf" directory. - // So let's make a quick check to see if we can rename any - // potential offenders so as to allow them to move into the - // "destf" directory. The scheme is dead simple: simply tack - // on "_copy_N" where N starts at 1 and works its way up until - // we find a free space. - - // removed source file staging.. it's more confusing when failed. - for (int counter = 1; fs.exists(itemDest) || destExists(result, itemDest); counter++) { - itemDest = new Path(destf, name + ("_copy_" + counter) + filetype); + }); + Futures.addCallback(future, new FutureCallback>() { + @Override + public void onSuccess(ObjectPair pair) { + LOG.debug("Moved src: {}", pair.getFirst().toString(), ", to dest: {}", pair.getSecond().toString()); } - } - srcToDest.add(new Path[]{itemSource, itemDest}); + @Override + public void onFailure(Throwable t) { + LOG.error("Failed to move: {}", srcP,t.getCause()); + exceptions.add(t.getCause()); + pool.shutdownNow(); + } + }); } - result.add(srcToDest); } - } catch (IOException e) { - throw new HiveException("checkPaths: filesystem error in check phase. " + e.getMessage(), e); - } - return result; + pool.shutdown(); + try { + pool.awaitTermination(1, TimeUnit.HOURS); + } catch (InterruptedException e) { + throw new HiveException(e); + } + if (!exceptions.isEmpty()) { + throw new HiveException(exceptions.get(0)); + } } private static boolean destExists(List> result, Path proposed) { @@ -2693,14 +2735,34 @@ private static String getQualifiedPathWithoutSchemeAndAuthority(Path srcf, FileS return ShimLoader.getHadoopShims().getPathWithoutSchemeAndAuthority(path).toString(); } + private static Path mvFile(HiveConf conf, Path srcf, Path destf, boolean isSrcLocal, + FileSystem srcFs, FileSystem destFs, String filetype) throws IOException { + + for (int counter = 1; destFs.exists(destf); counter++) { + destf = new Path(destf, srcf.getName() + ("_copy_" + counter) + filetype); + } + if (isSrcLocal) { + // For local src file, copy to hdfs + destFs.copyFromLocalFile(srcf, destf); + } else { + //copy if across file system or encryption zones. + LOG.info("Copying source " + srcf + " to " + destf + " because HDFS encryption zones are different."); + FileUtils.copy(srcf.getFileSystem(conf), srcf, destf.getFileSystem(conf), destf, + true, // delete source + false, // overwrite destination + conf); + } + return destf; + } + //it is assumed that parent directory of the destf should already exist when this //method is called. when the replace value is true, this method works a little different //from mv command if the destf is a directory, it replaces the destf instead of moving under //the destf. in this case, the replaced destf still preserves the original destf's permission - public static boolean moveFile(HiveConf conf, Path srcf, Path destf, + public static boolean moveFile(HiveConf conf, Path srcf, final Path destf, boolean replace, boolean isSrcLocal) throws HiveException { boolean success = false; - FileSystem srcFs, destFs; + final FileSystem srcFs, destFs; try { destFs = destf.getFileSystem(conf); } catch (IOException e) { @@ -2765,30 +2827,38 @@ public static boolean moveFile(HiveConf conf, Path srcf, Path destf, if (srcs.length == 0) { success = true; // Nothing to move. } + final ListeningExecutorService pool = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool( + conf.getIntVar(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT), + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("MoveDir-Thread-%d").build())); /* Move files one by one because source is a subdirectory of destination */ - for (FileStatus status : srcs) { - Path destFile; - - /* Append the source filename to the destination directory */ - if (destFs.isDirectory(destf)) { - destFile = new Path(destf, status.getPath().getName()); - } else { - destFile = destf; - } - - // Destination should be replaced, so we delete it first - if (destFs.exists(destFile)) { - if (!destFs.delete(destFile, true)) { - throw new HiveException(String.format("File to replace could not be deleted: %s", destFile)); + for (final FileStatus status : srcs) { + ListenableFuture renameStatus = pool.submit(new Callable() { + @Override + public Boolean call() throws Exception { + return destFs.rename(status.getPath(), destf); } - } - - if (!(destFs.rename(status.getPath(), destFile))) { - throw new HiveException("Unable to move source " + status.getPath() + " to destination " + destf); - } + }); + Futures.addCallback(renameStatus, new FutureCallback() { + @Override + public void onSuccess(Boolean result) { + if (!result) { + LOG.debug("Failed to rename."); + pool.shutdownNow(); + } + } + @Override + public void onFailure(Throwable t) { + LOG.debug("Failed to rename.", t.getCause()); + pool.shutdownNow(); + } + }); + } + pool.shutdown(); + try { + success = pool.awaitTermination(1, TimeUnit.HOURS); + } catch (InterruptedException e) { + throw new HiveException(e); } - - success = true; } else { success = destFs.rename(srcf, destf); } @@ -2814,8 +2884,9 @@ public static boolean moveFile(HiveConf conf, Path srcf, Path destf, /** * If moving across different FileSystems or differnent encryption zone, need to do a File copy instead of rename. * TODO- consider if need to do this for different file authority. + * @throws HiveException */ - static protected boolean needToCopy(Path srcf, Path destf, FileSystem srcFs, FileSystem destFs) throws HiveException, IOException { + static protected boolean needToCopy(Path srcf, Path destf, FileSystem srcFs, FileSystem destFs) throws HiveException { //Check if different FileSystems if (!srcFs.getClass().equals(destFs.getClass())) { return true; @@ -2823,8 +2894,12 @@ static protected boolean needToCopy(Path srcf, Path destf, FileSystem srcFs, Fil //Check if different encryption zones HadoopShims.HdfsEncryptionShim hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim(); - return hdfsEncryptionShim != null && (hdfsEncryptionShim.isPathEncrypted(srcf) || hdfsEncryptionShim.isPathEncrypted(destf)) - && !hdfsEncryptionShim.arePathsOnSameEncryptionZone(srcf, destf); + try { + return hdfsEncryptionShim != null && (hdfsEncryptionShim.isPathEncrypted(srcf) || hdfsEncryptionShim.isPathEncrypted(destf)) + && !hdfsEncryptionShim.arePathsOnSameEncryptionZone(srcf, destf); + } catch (IOException e) { + throw new HiveException(e); + } } /** @@ -2875,22 +2950,7 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, if (isAcid) { moveAcidFiles(srcFs, srcs, destf, newFiles); } else { - // check that source and target paths exist - List> result = checkPaths(conf, fs, srcs, srcFs, destf, false); - // move it, move it - try { - for (List sdpairs : result) { - for (Path[] sdpair : sdpairs) { - if (!moveFile(conf, sdpair[0], sdpair[1], false, isSrcLocal)) { - throw new IOException("Cannot move " + sdpair[0] + " to " - + sdpair[1]); - } - if (newFiles != null) newFiles.add(sdpair[1]); - } - } - } catch (IOException e) { - throw new HiveException("copyFiles: error while moving files!!! " + e.getMessage(), e); - } + copyFiles(conf, fs, srcs, srcFs, destf, isSrcLocal, newFiles); } } diff --git a/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java b/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java index cb1e2b7..2c37a51 100644 --- a/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java +++ b/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java @@ -82,6 +82,7 @@ public ProxyFileSystem(FileSystem fs) { * @return * @throws IOException */ + @Override public Path resolvePath(final Path p) throws IOException { // Return the fully-qualified path of path f resolving the path // through any symlinks or mount point @@ -174,7 +175,9 @@ public boolean setReplication(Path src, short replication) throws IOException { @Override public boolean rename(Path src, Path dst) throws IOException { - return super.rename(swizzleParamPath(src), swizzleParamPath(dst)); + Path dest = swizzleParamPath(dst); + // Make sure for existing destination we return false as per FileSystem api contract + return super.isFile(dest) ? false : super.rename(swizzleParamPath(src), dest); } @Override -- 1.7.12.4 (Apple Git-37)