diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index e6b47de877..3550747c16 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -768,7 +768,7 @@ private boolean needConversion(TableDesc tableDesc, List partDesc private FileStatus[] listStatusUnderPath(FileSystem fs, Path p) throws IOException { boolean recursive = job.getBoolean(FileInputFormat.INPUT_DIR_RECURSIVE, false); // If this is in acid format always read it recursively regardless of what the jobconf says. - if (!recursive && !AcidUtils.isAcid(p, job)) { + if (!recursive && !AcidUtils.isAcid(fs, p, job)) { return fs.listStatus(p, FileUtils.HIDDEN_FILES_PATH_FILTER); } List results = new ArrayList(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 50a233d5de..af8743d0f3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -1080,14 +1080,15 @@ public static ParsedDelta parsedDelta(Path deltaDir, boolean isRawFormat) { /** * Is the given directory in ACID format? + * @param fileSystem file system instance * @param directory the partition directory to check * @param conf the query configuration * @return true, if it is an ACID directory * @throws IOException */ - public static boolean isAcid(Path directory, + public static boolean isAcid(FileSystem fileSystem, Path directory, Configuration conf) throws IOException { - FileSystem fs = directory.getFileSystem(conf); + FileSystem fs = fileSystem == null ? directory.getFileSystem(conf) : fileSystem; for(FileStatus file: fs.listStatus(directory)) { String filename = file.getPath().getName(); if (filename.startsWith(BASE_PREFIX) || @@ -1106,7 +1107,7 @@ public static Directory getAcidState(Path directory, Configuration conf, ValidWriteIdList writeIdList ) throws IOException { - return getAcidState(directory, conf, writeIdList, false, false); + return getAcidState(null, directory, conf, writeIdList, false, false); } /** State class for getChildState; cannot modify 2 things in a method. */ @@ -1122,22 +1123,23 @@ public static Directory getAcidState(Path directory, * base and diff directories. Note that because major compactions don't * preserve the history, we can't use a base directory that includes a * write id that we must exclude. + * @param fileSystem file system instance * @param directory the partition directory to analyze * @param conf the configuration * @param writeIdList the list of write ids that we are reading * @return the state of the directory * @throws IOException */ - public static Directory getAcidState(Path directory, + public static Directory getAcidState(FileSystem fileSystem, Path directory, Configuration conf, ValidWriteIdList writeIdList, boolean useFileIds, boolean ignoreEmptyFiles ) throws IOException { - return getAcidState(directory, conf, writeIdList, Ref.from(useFileIds), ignoreEmptyFiles, null); + return getAcidState(fileSystem, directory, conf, writeIdList, Ref.from(useFileIds), ignoreEmptyFiles, null); } - public static Directory getAcidState(Path directory, + public static Directory getAcidState(FileSystem fileSystem, Path directory, Configuration conf, ValidWriteIdList writeIdList, Ref useFileIds, @@ -1160,7 +1162,7 @@ public static Directory getAcidState(Path directory, validTxnList.readFromString(s); } - FileSystem fs = directory.getFileSystem(conf); + FileSystem fs = fileSystem == null ? directory.getFileSystem(conf) : fileSystem; // The following 'deltas' includes all kinds of delta files including insert & delete deltas. final List deltas = new ArrayList(); List working = new ArrayList(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java index 11876fbb10..0287bd363f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java @@ -187,8 +187,8 @@ public void initIOContext(FileSplit split, JobConf job, long blockStart = -1; FileSplit fileSplit = split; Path path = fileSplit.getPath(); - FileSystem fs = path.getFileSystem(job); if (inputFormatClass.getName().contains("SequenceFile")) { + FileSystem fs = path.getFileSystem(job); SequenceFile.Reader in = new SequenceFile.Reader(fs, path, job); blockPointer = in.isBlockCompressed(); in.sync(fileSplit.getStart()); @@ -198,6 +198,7 @@ public void initIOContext(FileSplit split, JobConf job, blockPointer = true; blockStart = ((RCFileRecordReader) recordReader).getStart(); } else if (inputFormatClass.getName().contains("RCFile")) { + FileSystem fs = path.getFileSystem(job); blockPointer = true; RCFile.Reader in = new RCFile.Reader(fs, path, job); in.sync(fileSplit.getStart()); @@ -205,7 +206,7 @@ public void initIOContext(FileSplit split, JobConf job, in.close(); } this.jobConf = job; - this.initIOContext(blockStart, blockPointer, path.makeQualified(fs)); + this.initIOContext(blockStart, blockPointer, path); this.initIOContextSortedProps(split, recordReader, job); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 6bac285c15..086e59f255 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -604,7 +604,7 @@ private static void processForWriteIds(Path dir, Configuration conf, } if (hasAcidDirs) { AcidUtils.Directory dirInfo = AcidUtils.getAcidState( - dir, conf, validWriteIdList, Ref.from(false), true, null); + fs, dir, conf, validWriteIdList, Ref.from(false), true, null); // Find the base, created for IOW. Path base = dirInfo.getBaseDirectory(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 9dac185067..81981705ef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -18,11 +18,6 @@ package org.apache.hadoop.hive.ql.io.orc; -import org.apache.hadoop.hive.common.BlobStorageUtils; -import org.apache.hadoop.hive.common.NoDynamicValuesException; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hdfs.DistributedFileSystem; - import java.io.IOException; import java.nio.ByteBuffer; import java.security.PrivilegedExceptionAction; @@ -49,6 +44,10 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hive.common.BlobStorageUtils; +import org.apache.hadoop.hive.common.NoDynamicValuesException; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; @@ -120,7 +119,6 @@ import org.apache.orc.FileFormatException; import org.apache.orc.OrcProto; import org.apache.orc.OrcProto.Footer; -import org.apache.orc.OrcProto.Type; import org.apache.orc.OrcUtils; import org.apache.orc.StripeInformation; import org.apache.orc.StripeStatistics; @@ -194,7 +192,7 @@ @Override public boolean shouldSkipCombine(Path path, Configuration conf) throws IOException { - return (conf.get(AcidUtils.CONF_ACID_KEY) != null) || AcidUtils.isAcid(path, conf); + return (conf.get(AcidUtils.CONF_ACID_KEY) != null) || AcidUtils.isAcid(null, path, conf); } @@ -626,6 +624,8 @@ public boolean validateInput(FileSystem fs, HiveConf conf, private SplitStrategyKind splitStrategyKind; private final SearchArgument sarg; private final AcidOperationalProperties acidOperationalProperties; + private final boolean isAcid; + private final boolean isVectorMode; Context(Configuration conf) throws IOException { this(conf, 1, null); @@ -639,6 +639,8 @@ public boolean validateInput(FileSystem fs, HiveConf conf, Context(Configuration conf, final int minSplits, ExternalFooterCachesByConf efc) throws IOException { this.conf = conf; + this.isAcid = AcidUtils.isFullAcidScan(conf); + this.isVectorMode = Utilities.getIsVectorized(conf); this.forceThreadpool = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); this.sarg = ConvertAstToSearchArg.createFromConf(conf); minSize = HiveConf.getLongVar(conf, ConfVars.MAPREDMINSPLITSIZE, DEFAULT_MIN_SPLIT_SIZE); @@ -647,7 +649,6 @@ public boolean validateInput(FileSystem fs, HiveConf conf, if (ss == null || ss.equals(SplitStrategyKind.HYBRID.name())) { splitStrategyKind = SplitStrategyKind.HYBRID; } else { - LOG.info("Enforcing " + ss + " ORC split strategy"); splitStrategyKind = SplitStrategyKind.valueOf(ss); } footerInSplits = HiveConf.getBoolVar(conf, @@ -655,7 +656,6 @@ public boolean validateInput(FileSystem fs, HiveConf conf, numBuckets = Math.max(conf.getInt(hive_metastoreConstants.BUCKET_COUNT, 0), 0); splitStrategyBatchMs = HiveConf.getIntVar(conf, ConfVars.HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS); - LOG.debug("Number of buckets specified by conf file is " + numBuckets); long cacheMemSize = HiveConf.getSizeVar( conf, ConfVars.HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE); int numThreads = HiveConf.getIntVar(conf, ConfVars.HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS); @@ -717,8 +717,37 @@ public boolean validateInput(FileSystem fs, HiveConf conf, String value = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); writeIdList = value == null ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(value); - LOG.debug("Context:: Read ValidWriteIdList: " + writeIdList.toString() - + " isTransactionalTable: " + isTxnTable + " properties: " + txnProperties); + LOG.info("Context:: " + + "isAcid: {} " + + "isVectorMode: {} " + + "sarg: {} " + + "minSplitSize: {} " + + "maxSplitSize: {} " + + "splitStrategy: {} " + + "footerInSplits: {} " + + "numBuckets: {} " + + "numThreads: {} " + + "cacheMemSize: {} " + + "cacheStripeDetails: {} " + + "useSoftReference: {} " + + "writeIdList: {} " + + "isTransactionalTable: {} " + + "txnProperties: {} ", + isAcid, + isVectorMode, + sarg, + minSize, + maxSize, + splitStrategyKind, + footerInSplits, + numBuckets, + numThreads, + cacheMemSize, + cacheStripeDetails, + useSoftReference, + writeIdList, + isTxnTable, + txnProperties); } @VisibleForTesting @@ -1038,6 +1067,8 @@ private void runGetSplitsSync(List>> splitFutures, private final boolean allowSyntheticFileIds; private final boolean isDefaultFs; private final Configuration conf; + private final boolean isAcid; + private final boolean vectorMode; /** * @param dir - root of partition dir @@ -1054,13 +1085,13 @@ public BISplitStrategy(Context context, FileSystem fs, Path dir, this.allowSyntheticFileIds = allowSyntheticFileIds; this.isDefaultFs = isDefaultFs; this.conf = context.conf; + this.isAcid = context.isAcid; + this.vectorMode = context.isVectorMode; } @Override public List getSplits() throws IOException { List splits = Lists.newArrayList(); - boolean isAcid = AcidUtils.isFullAcidScan(conf); - boolean vectorMode = Utilities.getIsVectorized(conf); OrcSplit.OffsetAndBucketProperty offsetAndBucket = null; for (HdfsFileStatusWithId file : fileStatuses) { if (isOriginal && isAcid && vectorMode) { @@ -1077,7 +1108,9 @@ public BISplitStrategy(Context context, FileSystem fs, Path dir, } if (BlobStorageUtils.isBlobStorageFileSystem(conf, fs)) { final long splitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVE_ORC_BLOB_STORAGE_SPLIT_SIZE); - LOG.info("Blob storage detected for BI split strategy. Splitting files at boundary {}..", splitSize); + if (LOG.isDebugEnabled()) { + LOG.debug("Blob storage detected for BI split strategy. Splitting files at boundary {}..", splitSize); + } long start; for (start = 0; start < logicalLen; start = start + splitSize) { OrcSplit orcSplit = new OrcSplit(fileStatus.getPath(), fileKey, start, @@ -1228,7 +1261,7 @@ private AcidDirInfo callInternal() throws IOException { } //todo: shouldn't ignoreEmptyFiles be set based on ExecutionEngine? AcidUtils.Directory dirInfo = AcidUtils.getAcidState( - dir, context.conf, context.writeIdList, useFileIds, true, null); + fs, dir, context.conf, context.writeIdList, useFileIds, true, null); // find the base files (original or new style) List baseFiles = new ArrayList<>(); if (dirInfo.getBaseDirectory() == null) { @@ -1543,8 +1576,8 @@ public String toString() { } private List callInternal() throws IOException { - boolean isAcid = AcidUtils.isFullAcidScan(context.conf); - boolean vectorMode = Utilities.getIsVectorized(context.conf); + boolean isAcid = context.isAcid; + boolean vectorMode = context.isVectorMode; if (isOriginal && isAcid && vectorMode) { offsetAndBucket = VectorizedOrcAcidRowBatchReader.computeOffsetAndBucket(file, rootDir, isOriginal, @@ -1958,9 +1991,8 @@ private static void scheduleSplits(ETLSplitStrategy splitStrategy, Context conte @Override public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("getSplits started"); - } + long start = System.currentTimeMillis(); + LOG.info("getSplits started"); Configuration conf = job; if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_ORC_MS_FOOTER_CACHE_ENABLED)) { // Create HiveConf once, since this is expensive. @@ -1968,9 +2000,8 @@ private static void scheduleSplits(ETLSplitStrategy splitStrategy, Context conte } List result = generateSplitsInfo(conf, new Context(conf, numSplits, createExternalCaches())); - if (LOG.isDebugEnabled()) { - LOG.debug("getSplits finished"); - } + long end = System.currentTimeMillis(); + LOG.info("getSplits finished (#splits: {}). duration: {} ms", result.size(), (end - start)); return result.toArray(new InputSplit[result.size()]); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java index 62a1061dfd..93db1f8296 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java @@ -463,7 +463,8 @@ static int encodeBucketId(Configuration conf, int bucketId, int statementId) { */ //the split is from something other than the 1st file of the logical bucket - compute offset AcidUtils.Directory directoryState - = AcidUtils.getAcidState(mergerOptions.getRootPath(), conf, validWriteIdList, false, true); + = AcidUtils.getAcidState(null, mergerOptions.getRootPath(), conf, validWriteIdList, false, + true); for (HadoopShims.HdfsFileStatusWithId f : directoryState.getOriginalFiles()) { int bucketIdFromPath = AcidUtils.parseBucketId(f.getFileStatus().getPath()); if (bucketIdFromPath != bucketId) { @@ -577,7 +578,7 @@ public void next(OrcStruct next) throws IOException { assert options.getOffset() == 0; assert options.getMaxOffset() == Long.MAX_VALUE; AcidUtils.Directory directoryState - = AcidUtils.getAcidState(mergerOptions.getRootPath(), conf, validWriteIdList, false, true); + = AcidUtils.getAcidState(null, mergerOptions.getRootPath(), conf, validWriteIdList, false, true); /** * Note that for reading base_x/ or delta_x_x/ with non-acid schema, * {@link Options#getRootPath()} is set to base_x/ or delta_x_x/ which causes all it's diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java index 1795bb5457..e8b725b3df 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java @@ -718,7 +718,7 @@ dropped by the Reader (I guess because of orc.impl.SchemaEvolution) int bucketProperty = BucketCodec.V1.encode(new AcidOutputFormat.Options(conf) //statementId is from directory name (or 0 if there is none) .statementId(syntheticTxnInfo.statementId).bucket(bucketId)); - AcidUtils.Directory directoryState = AcidUtils.getAcidState( syntheticTxnInfo.folder, conf, + AcidUtils.Directory directoryState = AcidUtils.getAcidState(null, syntheticTxnInfo.folder, conf, validWriteIdList, false, true); for (HadoopShims.HdfsFileStatusWithId f : directoryState.getOriginalFiles()) { int bucketIdFromPath = AcidUtils.parseBucketId(f.getFileStatus().getPath()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 10192859a7..f323c311ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -266,7 +266,7 @@ void run(HiveConf conf, String jobName, Table t, Partition p, StorageDescriptor // and discovering that in getSplits is too late as we then have no way to pass it to our // mapper. - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, writeIds, false, true); + AcidUtils.Directory dir = AcidUtils.getAcidState(null, new Path(sd.getLocation()), conf, writeIds, false, true); List parsedDeltas = dir.getCurrentDirectories(); int maxDeltastoHandle = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_MAX_NUM_DELTA); if(parsedDeltas.size() > maxDeltastoHandle) { @@ -345,7 +345,8 @@ void run(HiveConf conf, String jobName, Table t, Partition p, StorageDescriptor private void runCrudCompaction(HiveConf hiveConf, Table t, Partition p, StorageDescriptor sd, ValidWriteIdList writeIds, CompactionInfo ci) throws IOException { AcidUtils.setAcidOperationalProperties(hiveConf, true, AcidUtils.getAcidOperationalProperties(t.getParameters())); - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), hiveConf, writeIds, Ref.from(false), false, + AcidUtils.Directory dir = AcidUtils.getAcidState(null, new Path(sd.getLocation()), hiveConf, writeIds, + Ref.from(false), false, t.getParameters()); int deltaCount = dir.getCurrentDirectories().size(); int origCount = dir.getOriginalFiles().size(); @@ -415,7 +416,7 @@ private void runMmCompaction(HiveConf conf, Table t, Partition p, StorageDescriptor sd, ValidWriteIdList writeIds, CompactionInfo ci) throws IOException { LOG.debug("Going to delete directories for aborted transactions for MM table " + t.getDbName() + "." + t.getTableName()); - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), + AcidUtils.Directory dir = AcidUtils.getAcidState(null, new Path(sd.getLocation()), conf, writeIds, Ref.from(false), false, t.getParameters()); removeFilesForMmTable(conf, dir); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index deabec6f87..6168fc0f79 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -291,7 +291,7 @@ private CompactionType determineCompactionType(CompactionInfo ci, ValidWriteIdLi boolean noBase = false; Path location = new Path(sd.getLocation()); FileSystem fs = location.getFileSystem(conf); - AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, writeIds, false, false); + AcidUtils.Directory dir = AcidUtils.getAcidState(fs, location, conf, writeIds, false, false); Path base = dir.getBaseDirectory(); long baseSize = 0; FileStatus stat = null;