diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 23ae0dc..25dfb60 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1551,6 +1551,8 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { HIVE_COMPACTOR_DELTA_PCT_THRESHOLD("hive.compactor.delta.pct.threshold", 0.1f, "Percentage (fractional) size of the delta files relative to the base that will trigger\n" + "a major compaction. (1.0 = 100%, so the default 0.1 = 10%.)"), + COMPACTOR_MAX_NUM_DELTA("hive.compactor.max.num.delta", 500, "Maximum number of delta files that " + + "the compactor will attempt to handle in a single job."), HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000, "Number of aborted transactions involving a given table or partition that will trigger\n" + diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 30db513..e8d070c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -132,6 +132,9 @@ public static String deltaSubdir(long min, long max, int statementId) { return deltaSubdir(min, max) + "_" + String.format(STATEMENT_DIGITS, statementId); } + public static String baseDir(long txnId) { + return BASE_PREFIX + String.format(DELTA_DIGITS, txnId); + } /** * Create a filename for a bucket file. * @param directory the partition directory @@ -221,14 +224,16 @@ static long parseBase(Path path) { Path getBaseDirectory(); /** - * Get the list of original files. + * Get the list of original files. Not {@code null}. * @return the list of original files (eg. 000000_0) */ List getOriginalFiles(); /** * Get the list of base and delta directories that are valid and not - * obsolete. + * obsolete. Not {@code null}. List must be sorted in a specific way. + * See {@link org.apache.hadoop.hive.ql.io.AcidUtils.ParsedDelta#compareTo(org.apache.hadoop.hive.ql.io.AcidUtils.ParsedDelta)} + * for details. * @return the minimal list of current directories */ List getCurrentDirectories(); @@ -237,7 +242,7 @@ static long parseBase(Path path) { * Get the list of obsolete directories. After filtering out bases and * deltas that are not selected by the valid transaction list, return the * list of original files, bases, and deltas that have been replaced by - * more up to date ones. + * more up to date ones. Not {@code null}. */ List getObsolete(); } @@ -284,6 +289,7 @@ public int getStatementId() { * happens in a different process; thus it's possible to have bases/deltas with * overlapping txnId boundaries. The sort order helps figure out the "best" set of files * to use to get data. + * This sorts "wider" delta before "narrower" i.e. delta_5_20 sorts before delta_5_10 (and delta_11_20) */ @Override public int compareTo(ParsedDelta parsedDelta) { @@ -499,6 +505,9 @@ public static Directory getAcidState(Path directory, } Collections.sort(working); + //so now, 'working' should be sorted like delta_5_20 delta_5_10 delta_11_20 delta_51_60 for example + //and we want to end up with the best set containing all relevant data: delta_5_20 delta_51_60, + //subject to list of 'exceptions' in 'txnList' (not show in above example). long current = bestBase.txn; int lastStmtId = -1; for(ParsedDelta next: working) { diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 391f99a..bab01a9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.ql.io.AcidInputFormat; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; @@ -94,18 +95,8 @@ public CompactorMR() { } - /** - * Run a compactor job. - * @param conf Hive configuration file - * @param jobName name to run this job with - * @param t metastore table - * @param sd metastore storage descriptor - * @param txns list of valid transactions - * @param isMajor is this a major compaction? - * @throws java.io.IOException if the job fails - */ - void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, - ValidTxnList txns, boolean isMajor, Worker.StatsUpdater su) throws IOException { + private JobConf createBaseJobConf(HiveConf conf, String jobName, Table t, StorageDescriptor sd, + ValidTxnList txns) { JobConf job = new JobConf(conf); job.setJobName(jobName); job.setOutputKeyClass(NullWritable.class); @@ -117,7 +108,7 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, job.setInputFormat(CompactorInputFormat.class); job.setOutputFormat(NullOutputFormat.class); job.setOutputCommitter(CompactorOutputCommitter.class); - + String queueName = conf.getVar(HiveConf.ConfVars.COMPACTOR_JOB_QUEUE); if(queueName != null && queueName.length() > 0) { job.setQueueName(queueName); @@ -127,23 +118,63 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, job.set(TMP_LOCATION, sd.getLocation() + "/" + TMPDIR + "_" + UUID.randomUUID().toString()); job.set(INPUT_FORMAT_CLASS_NAME, sd.getInputFormat()); job.set(OUTPUT_FORMAT_CLASS_NAME, sd.getOutputFormat()); - job.setBoolean(IS_MAJOR, isMajor); job.setBoolean(IS_COMPRESSED, sd.isCompressed()); job.set(TABLE_PROPS, new StringableMap(t.getParameters()).toString()); job.setInt(NUM_BUCKETS, sd.getNumBuckets()); job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); setColumnTypes(job, sd.getCols()); + return job; + } + /** + * Run Compaction which may consist of several jobs on the cluster. + * @param conf Hive configuration file + * @param jobName name to run this job with + * @param t metastore table + * @param sd metastore storage descriptor + * @param txns list of valid transactions + * @param ci CompactionInfo + * @throws java.io.IOException if the job fails + */ + void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, + ValidTxnList txns, CompactionInfo ci, Worker.StatsUpdater su) throws IOException { + JobConf job = createBaseJobConf(conf, jobName, t, sd, txns); // Figure out and encode what files we need to read. We do this here (rather than in // getSplits below) because as part of this we discover our minimum and maximum transactions, // and discovering that in getSplits is too late as we then have no way to pass it to our // mapper. - AcidUtils.Directory dir = AcidUtils.getAcidState( - new Path(sd.getLocation()), conf, txns, false); + AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, txns, false); + List parsedDeltas = dir.getCurrentDirectories(); + int maxDeltastoHandle = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_MAX_NUM_DELTA); + if(parsedDeltas.size() > maxDeltastoHandle) { + /** + * if here, that means we have very high number of delta files. This may be sign of a temporary + * glitch or a real issue. For example, if transaction batch size or transaction size is set too + * low for the event flow rate in Streaming API, it may generate lots of delta files very + * quickly. Another possibility is that Compaction is repeatedly failing and not actually compacting. + * Thus, force N minor compactions first to reduce number of deltas and then follow up with + * the compaction actually requested in {@link ci} which now needs to compact a lot fewer deltas + */ + LOG.warn(parsedDeltas.size() + " delta files found for " + ci.getFullPartitionName() + + " located at " + sd.getLocation() + "! This is likely a sign of misconfiguration, " + + "especially if this message repeats. Check that compaction is running properly. Check for any " + + "runaway/mis-configured process writing to ACID tables, especially using Streaming Ingest API."); + int numMinorCompactions = parsedDeltas.size() / maxDeltastoHandle; + for(int jobSubId = 0; jobSubId < numMinorCompactions; jobSubId++) { + JobConf jobMinorCompact = createBaseJobConf(conf, jobName + "_" + jobSubId, t, sd, txns); + launchCompactionJob(jobMinorCompact, + null, CompactionType.MINOR, null, + parsedDeltas.subList(jobSubId * maxDeltastoHandle, (jobSubId + 1) * maxDeltastoHandle), + maxDeltastoHandle, -1); + } + //now recompute state since we've done minor compactions and have different 'best' set of deltas + dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, txns); + } + StringableList dirsToSearch = new StringableList(); Path baseDir = null; - if (isMajor) { + if (ci.isMajorCompaction()) { // There may not be a base dir if the partition was empty before inserts or if this // partition is just now being converted to ACID. baseDir = dir.getBaseDirectory(); @@ -166,14 +197,26 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, } } - List parsedDeltas = dir.getCurrentDirectories(); - - if (parsedDeltas == null || parsedDeltas.size() == 0) { + if (parsedDeltas.size() == 0) { // Seriously, no deltas? Can't compact that. LOG.error( "No delta files found to compact in " + sd.getLocation()); + //couldn't someone want to run a Major compaction to convert old table to ACID? return; } + launchCompactionJob(job, baseDir, ci.type, dirsToSearch, dir.getCurrentDirectories(), + dir.getCurrentDirectories().size(), dir.getObsolete().size()); + + su.gatherStats(); + } + private void launchCompactionJob(JobConf job, Path baseDir, CompactionType compactionType, + StringableList dirsToSearch, + List parsedDeltas, + int curDirNumber, int obsoleteDirNumber) throws IOException { + job.setBoolean(IS_MAJOR, compactionType == CompactionType.MAJOR); + if(dirsToSearch == null) { + dirsToSearch = new StringableList(); + } StringableList deltaDirs = new StringableList(); long minTxn = Long.MAX_VALUE; long maxTxn = Long.MIN_VALUE; @@ -190,18 +233,15 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, job.set(DIRS_TO_SEARCH, dirsToSearch.toString()); job.setLong(MIN_TXN, minTxn); job.setLong(MAX_TXN, maxTxn); - LOG.debug("Setting minimum transaction to " + minTxn); - LOG.debug("Setting maximume transaction to " + maxTxn); + LOG.info("Submitting " + compactionType + " compaction job '" + + job.getJobName() + "' to " + job.getQueueName() + " queue. " + + "(current delta dirs count=" + curDirNumber + + ", obsolete delta dirs count=" + obsoleteDirNumber + ". TxnIdRange[" + minTxn + "," + maxTxn + "]"); RunningJob rj = JobClient.runJob(job); - LOG.info("Submitted " + (isMajor ? CompactionType.MAJOR : CompactionType.MINOR) + " compaction job '" + - jobName + "' with jobID=" + rj.getID() + " to " + job.getQueueName() + " queue. " + - "(current delta dirs count=" + dir.getCurrentDirectories().size() + - ", obsolete delta dirs count=" + dir.getObsolete()); + LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID()); rj.waitForCompletion(); - su.gatherStats(); } - /** * Set the column names and types into the job conf for the input format * to use. diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 0548117..cc7441a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -77,7 +77,7 @@ public void run() { // Make sure nothing escapes this run method and kills the metastore at large, // so wrap it in a big catch Throwable statement. try { - CompactionInfo ci = txnHandler.findNextToCompact(name); + final CompactionInfo ci = txnHandler.findNextToCompact(name); if (ci == null && !stop.get()) { try { @@ -158,14 +158,14 @@ public void run() { launchedJob = true; try { if (runJobAsSelf(runAs)) { - mr.run(conf, jobName.toString(), t, sd, txns, isMajor, su); + mr.run(conf, jobName.toString(), t, sd, txns, ci, su); } else { UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(), UserGroupInformation.getLoginUser()); ugi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { - mr.run(conf, jobName.toString(), t, sd, txns, isMajor, su); + mr.run(conf, jobName.toString(), t, sd, txns, ci, su); return null; } }); diff --git ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java index 11e5333..5bd73a9 100644 --- ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java +++ ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.*; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -29,6 +30,7 @@ import java.io.*; import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -508,6 +510,97 @@ public void majorTableWithBase() throws Exception { } @Test + public void minorNoBaseLotsOfDeltas() throws Exception { + compactNoBaseLotsOfDeltas(CompactionType.MINOR); + } + @Test + public void majorNoBaseLotsOfDeltas() throws Exception { + compactNoBaseLotsOfDeltas(CompactionType.MAJOR); + } + private void compactNoBaseLotsOfDeltas(CompactionType type) throws Exception { + conf.setIntVar(HiveConf.ConfVars.COMPACTOR_MAX_NUM_DELTA, 2); + Table t = newTable("default", "mapwb", true); + Partition p = newPartition(t, "today"); + +// addBaseFile(t, p, 20L, 20); + addDeltaFile(t, p, 21L, 22L, 2); + addDeltaFile(t, p, 23L, 24L, 2); + addDeltaFile(t, p, 25L, 29L, 2);//make it look like streaming API use case + addDeltaFile(t, p, 31L, 32L, 3); + addDeltaFile(t, p, 31L, 33L, 5);//make it looks like 31-32 has been compacted, but not cleaned + addDeltaFile(t, p, 34L, 35L, 1); + + /*since COMPACTOR_MAX_NUM_DELTA=2, + we expect files 1,2 to be minor compacted by 1 job to produce delta_21_24 + * 3,5 to be minor compacted by 2nd job (file 4 is obsolete) to make delta_25_33 + * + * and then the 'requested' + * minor compaction to combine delta_21_24, delta_25_33 and delta_34_35 to make delta_21_35 + * or major compaction to create base_35*/ + burnThroughTransactions(35); + CompactionRequest rqst = new CompactionRequest("default", "mapwb", type); + rqst.setPartitionname("ds=today"); + txnHandler.compact(rqst); + + startWorker(); + + ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); + List compacts = rsp.getCompacts(); + Assert.assertEquals(1, compacts.size()); + Assert.assertEquals("ready for cleaning", compacts.get(0).getState()); + + FileSystem fs = FileSystem.get(conf); + FileStatus[] stat = fs.listStatus(new Path(p.getSd().getLocation())); + Assert.assertEquals(9, stat.length); + + // Find the new delta file and make sure it has the right contents + BitSet matchesFound = new BitSet(9); + for (int i = 0; i < stat.length; i++) { + if(stat[i].getPath().getName().equals(makeDeltaDirName(21,22))) { + matchesFound.set(0); + } + else if(stat[i].getPath().getName().equals(makeDeltaDirName(23, 24))) { + matchesFound.set(1); + } + else if(stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(25, 29))) { + matchesFound.set(2); + } + else if(stat[i].getPath().getName().equals(makeDeltaDirName(31, 32))) { + matchesFound.set(3); + } + else if(stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(31, 33))) { + matchesFound.set(4); + } + else if(stat[i].getPath().getName().equals(makeDeltaDirName(34, 35))) { + matchesFound.set(5); + } + else if(stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21,24))) { + matchesFound.set(6); + } + else if(stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(25,33))) { + matchesFound.set(7); + } + switch (type) { + //yes, both do set(8) + case MINOR: + if(stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21,35))) { + matchesFound.set(8); + } + break; + case MAJOR: + if(stat[i].getPath().getName().equals(AcidUtils.baseDir(35))) { + matchesFound.set(8); + } + break; + default: + throw new IllegalStateException(); + } + } + for(int i = 0; i < stat.length; i++) { + Assert.assertEquals("Some file is missing: " + i, true, matchesFound.get(i)); + } + } + @Test public void majorPartitionWithBase() throws Exception { LOG.debug("Starting majorPartitionWithBase"); Table t = newTable("default", "mapwb", true);