From 8a9a7bd813091673fefb64f81a89e88a5cd8ead2 Mon Sep 17 00:00:00 2001 From: Ashutosh Chauhan Date: Tue, 2 Feb 2016 18:03:44 -0800 Subject: [PATCH] HIVE-12988 : Improve dynamic partition loading IV --- .../org/apache/hadoop/hive/ql/metadata/Hive.java | 204 +++++++++++++-------- 1 file changed, 124 insertions(+), 80 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 75d2519..65a08b1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -33,18 +33,24 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import com.google.common.collect.ImmutableMap; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -119,6 +125,7 @@ import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.shims.HadoopShims; +import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.security.UserGroupInformation; @@ -128,6 +135,11 @@ import org.slf4j.LoggerFactory; import com.google.common.collect.Sets; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; /** * This class has functions that implement meta data/DDL operations using calls @@ -1483,7 +1495,7 @@ public Partition loadPartition(Path loadPath, Table tbl, isSrcLocal); } else { if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null) { - newFiles = new ArrayList<>(); + newFiles = Collections.synchronizedList(new ArrayList()); } FileSystem fs = tbl.getDataLocation().getFileSystem(conf); @@ -1730,7 +1742,7 @@ private void constructOneLBLocationMap(FileStatus fSta, public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid) throws HiveException { - List newFiles = new ArrayList(); + List newFiles = Collections.synchronizedList(new ArrayList()); Table tbl = getTable(tableName); HiveConf sessionConf = SessionState.getSessionConf(); if (replace) { @@ -2560,75 +2572,97 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, } } - // for each file or directory in 'srcs', make mapping for every file in src to safe name in dest - private static List> checkPaths(HiveConf conf, FileSystem fs, - FileStatus[] srcs, FileSystem srcFs, Path destf, boolean replace) - throws HiveException { + private static void copyFiles(final HiveConf conf, final FileSystem fs, + FileStatus[] srcs, final FileSystem srcFs, final Path destf, final boolean isSrcLocal, final List newFiles) + throws HiveException { - List> result = new ArrayList>(); - try { - FileStatus destStatus = !replace ? FileUtils.getFileStatusOrNull(fs, destf) : null; - if (destStatus != null && !destStatus.isDir()) { - throw new HiveException("checkPaths: destination " + destf - + " should be a directory"); + final HadoopShims.HdfsFileStatus fullDestStatus; + try { + fullDestStatus = ShimLoader.getHadoopShims().getFullFileStatus(conf, fs, destf); + } catch (IOException e1) { + throw new HiveException(e1); + } + + if (!fullDestStatus.getFileStatus().isDirectory()) { + throw new HiveException(destf + " is not a directory."); } + final boolean inheritPerms = HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); + final ListeningExecutorService pool = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool()); + + final List exceptions = Collections.synchronizedList(new LinkedList()); for (FileStatus src : srcs) { - FileStatus[] items; - if (src.isDir()) { - items = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); - Arrays.sort(items); + FileStatus[] files; + if (src.isDirectory()) { + try { + files = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); + } catch (IOException e) { + pool.shutdownNow(); + exceptions.add(e); + break; + } } else { - items = new FileStatus[] {src}; + files = new FileStatus[] {src}; } - List srcToDest = new ArrayList(); - for (FileStatus item : items) { - - Path itemSource = item.getPath(); - - if (Utilities.isTempPath(item)) { - // This check is redundant because temp files are removed by - // execution layer before - // calling loadTable/Partition. But leaving it in just in case. - srcFs.delete(itemSource, true); - continue; + for (FileStatus srcFile : files) { + + final Path srcP = srcFile.getPath(); + // Strip off the file type, if any so we don't make: + // 000000_0.gz -> 000000_0.gz_copy_1 + final String name; + final String filetype; + String itemName = srcP.getName(); + int index = itemName.lastIndexOf('.'); + if (index >= 0) { + filetype = itemName.substring(index); + name = itemName.substring(0, index); + } else { + name = itemName; + filetype = ""; } - - Path itemDest = new Path(destf, itemSource.getName()); - - if (!replace) { - // Strip off the file type, if any so we don't make: - // 000000_0.gz -> 000000_0.gz_copy_1 - String name = itemSource.getName(); - String filetype; - int index = name.lastIndexOf('.'); - if (index >= 0) { - filetype = name.substring(index); - name = name.substring(0, index); - } else { - filetype = ""; + final ListenableFuture future = pool.submit(new Callable() { + @Override + public Path call() throws Exception { + Path itemDest = new Path(destf, srcP.getName()); + for (int counter = 1; fs.exists(itemDest); counter++) { + itemDest = new Path(destf, name + ("_copy_" + counter) + filetype); + } + if (null != newFiles) { + newFiles.add(itemDest); + } + return itemDest; } - // It's possible that the file we're copying may have the same - // relative name as an existing file in the "destf" directory. - // So let's make a quick check to see if we can rename any - // potential offenders so as to allow them to move into the - // "destf" directory. The scheme is dead simple: simply tack - // on "_copy_N" where N starts at 1 and works its way up until - // we find a free space. - - // removed source file staging.. it's more confusing when failed. - for (int counter = 1; fs.exists(itemDest) || destExists(result, itemDest); counter++) { - itemDest = new Path(destf, name + ("_copy_" + counter) + filetype); + }); + Futures.addCallback(future, new FutureCallback() { + @Override + public void onSuccess(Path destP) { + try { + mvFile(conf, srcP, destP, isSrcLocal, srcFs, fs, fullDestStatus, inheritPerms); + } catch (Exception e) { + LOG.error("Failed to move file " + srcP + " to " + destP ,e); + exceptions.add(e); + pool.shutdownNow(); + } } - } - srcToDest.add(new Path[]{itemSource, itemDest}); + @Override + public void onFailure(Throwable t) { + LOG.error("Failed to find unique name for file " + srcP + " to " ,t.getCause()); + exceptions.add(t.getCause()); + pool.shutdownNow(); + } + }); } - result.add(srcToDest); } - } catch (IOException e) { - throw new HiveException("checkPaths: filesystem error in check phase. " + e.getMessage(), e); - } - return result; + pool.shutdown(); + try { + pool.awaitTermination(3, TimeUnit.HOURS); + } catch (InterruptedException e) { + throw new HiveException(e); + } + if (!exceptions.isEmpty()) { + throw new HiveException(exceptions.get(0)); + } } private static boolean destExists(List> result, Path proposed) { @@ -2685,6 +2719,30 @@ private static String getQualifiedPathWithoutSchemeAndAuthority(Path srcf, FileS return ShimLoader.getHadoopShims().getPathWithoutSchemeAndAuthority(path).toString(); } + private static void mvFile(HiveConf conf, Path srcf, Path destf, boolean isSrcLocal, + FileSystem srcFs, FileSystem destFs, HdfsFileStatus destStatus, + boolean inheritPerms) throws IOException { + if (isSrcLocal) { + // For local src file, copy to hdfs + destFs.copyFromLocalFile(srcf, destf); + } else { + if (needToCopy(srcf, destf, srcFs, destFs)) { + //copy if across file system or encryption zones. + LOG.info("Copying source " + srcf + " to " + destf + " because HDFS encryption zones are different."); + FileUtils.copy(srcf.getFileSystem(conf), srcf, destf.getFileSystem(conf), destf, + true, // delete source + false, // overwrite destination + conf); + } else { + destFs.rename(srcf, destf); + } + } + LOG.info("Renaming src: {}", srcf.toString(), ", dest: {}", destf.toString()); + if (inheritPerms) { + ShimLoader.getHadoopShims().setFullFileStatus(conf, destStatus, destFs, destf); + } + } + //it is assumed that parent directory of the destf should already exist when this //method is called. when the replace value is true, this method works a little different //from mv command if the destf is a directory, it replaces the destf instead of moving under @@ -2807,16 +2865,17 @@ public static boolean moveFile(HiveConf conf, Path srcf, Path destf, * If moving across different FileSystems or differnent encryption zone, need to do a File copy instead of rename. * TODO- consider if need to do this for different file authority. */ - static protected boolean needToCopy(Path srcf, Path destf, FileSystem srcFs, FileSystem destFs) throws HiveException, IOException { + static protected boolean needToCopy(Path srcf, Path destf, FileSystem srcFs, FileSystem destFs) throws IOException { //Check if different FileSystems if (!srcFs.getClass().equals(destFs.getClass())) { return true; } + return false; //Check if different encryption zones - HadoopShims.HdfsEncryptionShim hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim(); - return hdfsEncryptionShim != null && (hdfsEncryptionShim.isPathEncrypted(srcf) || hdfsEncryptionShim.isPathEncrypted(destf)) - && !hdfsEncryptionShim.arePathsOnSameEncryptionZone(srcf, destf); +// HadoopShims.HdfsEncryptionShim hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim(); +// return hdfsEncryptionShim != null && (hdfsEncryptionShim.isPathEncrypted(srcf) || hdfsEncryptionShim.isPathEncrypted(destf)) +// && !hdfsEncryptionShim.arePathsOnSameEncryptionZone(srcf, destf); } /** @@ -2867,22 +2926,7 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, if (isAcid) { moveAcidFiles(srcFs, srcs, destf, newFiles); } else { - // check that source and target paths exist - List> result = checkPaths(conf, fs, srcs, srcFs, destf, false); - // move it, move it - try { - for (List sdpairs : result) { - for (Path[] sdpair : sdpairs) { - if (!moveFile(conf, sdpair[0], sdpair[1], false, isSrcLocal)) { - throw new IOException("Cannot move " + sdpair[0] + " to " - + sdpair[1]); - } - if (newFiles != null) newFiles.add(sdpair[1]); - } - } - } catch (IOException e) { - throw new HiveException("copyFiles: error while moving files!!! " + e.getMessage(), e); - } + copyFiles(conf, fs, srcs, srcFs, destf, isSrcLocal, newFiles); } } -- 1.7.12.4 (Apple Git-37)