diff --git a/common/pom.xml b/common/pom.xml
index 8141f75df6b649c0d9f66fd7c6efa2989e42938a..cc47257a0861aeecbc67715d76921fd87d5a0dc0 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -64,6 +64,17 @@
org.eclipse.jetty.aggregate
jetty-all
${jetty.version}
+
+
+ javax.servlet
+ servlet-api
+
+
+
+
+ org.eclipse.jetty.orbit
+ javax.servlet
+ ${javax-servlet.version}
joda-time
@@ -100,36 +111,44 @@
hadoop-common
${hadoop.version}
true
-
-
- commons-collections
- commons-collections
-
-
- org.slf4j
- slf4j-log4j12
-
-
- commmons-logging
- commons-logging
-
-
+
+
+ commons-collections
+ commons-collections
+
+
+ javax.servlet
+ servlet-api
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ commmons-logging
+ commons-logging
+
+
org.apache.hadoop
hadoop-mapreduce-client-core
${hadoop.version}
true
-
-
- org.slf4j
- slf4j-log4j12
-
-
- commmons-logging
- commons-logging
-
-
+
+
+ javax.servlet
+ servlet-api
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ commmons-logging
+ commons-logging
+
+
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 2c25cae570b33dd03e33be3df45a964cc0a7f071..97fe7bca2f541438ef32c0e21c6e8700d3e29e65 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -36,15 +36,14 @@
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-
import javax.security.auth.login.LoginException;
-
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.conf.Validator.PatternSet;
import org.apache.hadoop.hive.conf.Validator.RangeValidator;
import org.apache.hadoop.hive.conf.Validator.RatioValidator;
+import org.apache.hadoop.hive.conf.Validator.SizeValidator;
import org.apache.hadoop.hive.conf.Validator.StringSet;
import org.apache.hadoop.hive.conf.Validator.TimeValidator;
import org.apache.hadoop.hive.shims.Utils;
@@ -971,6 +970,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", false,
"Use schema evolution to convert self-describing file format's data to the schema desired by the reader."),
+ HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false,
+ "internal usage only -- do transaction (ACID) table scan.", true),
+
HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0,
"A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."),
@@ -993,6 +995,7 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false),
HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false),
HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false),
+ HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false),
HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
"Merge small files at the end of a map-only job"),
@@ -1571,11 +1574,32 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000,
"Number of aborted transactions involving a given table or partition that will trigger\n" +
"a major compaction."),
-
+
+ COMPACTOR_INITIATOR_FAILED_THRESHOLD("hive.compactor.initiator.failed.compacts.threshold", 2,
+ new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " +
+ "after which automatic compactions will not be scheduled any more. Note that this must be less " +
+ "than hive.compactor.history.retention.failed."),
+
HIVE_COMPACTOR_CLEANER_RUN_INTERVAL("hive.compactor.cleaner.run.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Time between runs of the cleaner thread"),
COMPACTOR_JOB_QUEUE("hive.compactor.job.queue", "", "Used to specify name of Hadoop queue to which\n" +
"Compaction jobs will be submitted. Set to empty string to let Hadoop choose the queue."),
+
+ COMPACTOR_HISTORY_RETENTION_SUCCEEDED("hive.compactor.history.retention.succeeded", 3,
+ new RangeValidator(0, 100), "Determines how many successful compaction records will be " +
+ "retained in compaction history for a given table/partition."),
+
+ COMPACTOR_HISTORY_RETENTION_FAILED("hive.compactor.history.retention.failed", 3,
+ new RangeValidator(0, 100), "Determines how many failed compaction records will be " +
+ "retained in compaction history for a given table/partition."),
+
+ COMPACTOR_HISTORY_RETENTION_ATTEMPTED("hive.compactor.history.retention.attempted", 2,
+ new RangeValidator(0, 100), "Determines how many attempted compaction records will be " +
+ "retained in compaction history for a given table/partition."),
+
+ COMPACTOR_HISTORY_REAPER_INTERVAL("hive.compactor.history.reaper.interval", "2m",
+ new TimeValidator(TimeUnit.MILLISECONDS), "Determines how often compaction history reaper runs"),
+
HIVE_TIMEDOUT_TXN_REAPER_START("hive.timedout.txn.reaper.start", "100s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time delay of 1st reaper run after metastore start"),
HIVE_TIMEDOUT_TXN_REAPER_INTERVAL("hive.timedout.txn.reaper.interval", "180s",
@@ -1649,7 +1673,7 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false,
"enable or disable the Hive client authorization"),
HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
- "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider",
+ "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory",
"The Hive client authorization manager class name. The user defined authorization class should implement \n" +
"interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."),
HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager",
@@ -1900,6 +1924,10 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER("hive.server2.tez.session.lifetime.jitter", "3h",
new TimeValidator(TimeUnit.HOURS),
"The jitter for Tez session lifetime; prevents all the sessions from restarting at once."),
+ HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS("hive.server2.tez.sessions.init.threads", 16,
+ "If hive.server2.tez.initialize.default.sessions is enabled, the maximum number of\n" +
+ "threads to use to initialize the default sessions."),
+
// Operation log configuration
HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true,
@@ -2260,7 +2288,6 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
"Whether to generate the splits locally or in the AM (tez only)"),
HIVE_TEZ_GENERATE_CONSISTENT_SPLITS("hive.tez.input.generate.consistent.splits", true,
"Whether to generate consistent split locations when generating splits in the AM"),
-
HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez/Spark (Hadoop 2 only)"),
HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez/Spark (Hadoop 2 only)"),
@@ -2346,18 +2373,18 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
"LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a\n" +
"custom off-heap allocator, 'allocator' uses the custom allocator without the caches,\n" +
"'none' doesn't use either (this mode may result in significant performance degradation)"),
- LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", 128 * 1024,
+ LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", "128Kb", new SizeValidator(),
"Minimum allocation possible from LLAP buddy allocator. Allocations below that are\n" +
"padded to minimum allocation. For ORC, should generally be the same as the expected\n" +
"compression buffer size, or next lowest power of 2. Must be a power of 2."),
- LLAP_ALLOCATOR_MAX_ALLOC("hive.llap.io.allocator.alloc.max", 16 * 1024 * 1024,
+ LLAP_ALLOCATOR_MAX_ALLOC("hive.llap.io.allocator.alloc.max", "16Mb", new SizeValidator(),
"Maximum allocation possible from LLAP buddy allocator. For ORC, should be as large as\n" +
"the largest expected ORC compression buffer size. Must be a power of 2."),
LLAP_ALLOCATOR_ARENA_COUNT("hive.llap.io.allocator.arena.count", 8,
"Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" +
"(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" +
"not the case, an adjusted size will be used. Using powers of 2 is recommended."),
- LLAP_IO_MEMORY_MAX_SIZE("hive.llap.io.memory.size", 1024L * 1024 * 1024,
+ LLAP_IO_MEMORY_MAX_SIZE("hive.llap.io.memory.size", "1Gb", new SizeValidator(),
"Maximum size for IO allocator or ORC low-level cache.", "hive.llap.io.cache.orc.size"),
LLAP_ALLOCATOR_DIRECT("hive.llap.io.allocator.direct", true,
"Whether ORC low-level cache should use direct allocation."),
@@ -2473,7 +2500,7 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
new TimeValidator(TimeUnit.SECONDS),
"How long to delay before cleaning up query files in LLAP (in seconds, for debugging).",
"llap.file.cleanup.delay-seconds"),
- LLAP_DAEMON_SERVICE_HOSTS("hive.llap.daemon.service.hosts", "",
+ LLAP_DAEMON_SERVICE_HOSTS("hive.llap.daemon.service.hosts", null,
"Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,\n" +
"YARN registry is used.", "llap.daemon.service.hosts"),
LLAP_DAEMON_SERVICE_REFRESH_INTERVAL("hive.llap.daemon.service.refresh.interval.sec", "60s",
@@ -2483,6 +2510,8 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
LLAP_DAEMON_COMMUNICATOR_NUM_THREADS("hive.llap.daemon.communicator.num.threads", 10,
"Number of threads to use in LLAP task communicator in Tez AM.",
"llap.daemon.communicator.num.threads"),
+ LLAP_DAEMON_ALLOW_PERMANENT_FNS("hive.llap.daemon.allow.permanent.fns", true,
+ "Whether LLAP daemon should localize the resources for permanent UDFs."),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.min.timeout.ms", "200ms",
new TimeValidator(TimeUnit.MILLISECONDS),
@@ -2540,6 +2569,10 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
"llap.daemon.service.port"),
LLAP_DAEMON_WEB_SSL("hive.llap.daemon.web.ssl", false,
"Whether LLAP daemon web UI should use SSL.", "llap.daemon.service.ssl"),
+ LLAP_CLIENT_CONSISTENT_SPLITS("hive.llap.client.consistent.splits",
+ false,
+ "Whether to setup split locations to match nodes on which llap daemons are running," +
+ " instead of using the locations provided by the split itself"),
SPARK_CLIENT_FUTURE_TIMEOUT("hive.spark.client.future.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
@@ -2935,6 +2968,14 @@ public void setTimeVar(ConfVars var, long time, TimeUnit outUnit) {
setTimeVar(this, var, time, outUnit);
}
+ public static long getSizeVar(Configuration conf, ConfVars var) {
+ return toSizeBytes(getVar(conf, var));
+ }
+
+ public long getSizeVar(ConfVars var) {
+ return getSizeVar(this, var);
+ }
+
private static TimeUnit getDefaultTimeUnit(ConfVars var) {
TimeUnit inputUnit = null;
if (var.validator instanceof TimeValidator) {
@@ -2944,11 +2985,16 @@ private static TimeUnit getDefaultTimeUnit(ConfVars var) {
}
public static long toTime(String value, TimeUnit inputUnit, TimeUnit outUnit) {
- String[] parsed = parseTime(value.trim());
+ String[] parsed = parseNumberFollowedByUnit(value.trim());
return outUnit.convert(Long.valueOf(parsed[0].trim().trim()), unitFor(parsed[1].trim(), inputUnit));
}
- private static String[] parseTime(String value) {
+ public static long toSizeBytes(String value) {
+ String[] parsed = parseNumberFollowedByUnit(value.trim());
+ return Long.valueOf(parsed[0].trim()) * multiplierFor(parsed[1].trim());
+ }
+
+ private static String[] parseNumberFollowedByUnit(String value) {
char[] chars = value.toCharArray();
int i = 0;
for (; i < chars.length && (chars[i] == '-' || Character.isDigit(chars[i])); i++) {
@@ -2981,6 +3027,25 @@ public static TimeUnit unitFor(String unit, TimeUnit defaultUnit) {
throw new IllegalArgumentException("Invalid time unit " + unit);
}
+
+ public static long multiplierFor(String unit) {
+ unit = unit.trim().toLowerCase();
+ if (unit.isEmpty() || unit.equals("b") || unit.equals("bytes")) {
+ return 1;
+ } else if (unit.equals("kb")) {
+ return 1024;
+ } else if (unit.equals("mb")) {
+ return 1024*1024;
+ } else if (unit.equals("gb")) {
+ return 1024*1024*1024;
+ } else if (unit.equals("tb")) {
+ return 1024*1024*1024*1024;
+ } else if (unit.equals("pb")) {
+ return 1024*1024*1024*1024*1024;
+ }
+ throw new IllegalArgumentException("Invalid size unit " + unit);
+ }
+
public static String stringFor(TimeUnit timeunit) {
switch (timeunit) {
case DAYS: return "day";
diff --git a/common/src/java/org/apache/hadoop/hive/conf/Validator.java b/common/src/java/org/apache/hadoop/hive/conf/Validator.java
index 04a305dbf15376c116ae660cdb8a42bc948ae6e3..3fb09b982335b07de2d64c9c16adfe20bb573593 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/Validator.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/Validator.java
@@ -281,4 +281,69 @@ private String timeString(long time) {
return time + " " + HiveConf.stringFor(timeUnit);
}
}
+
+
+ class SizeValidator implements Validator {
+
+ private final Long min;
+ private final boolean minInclusive;
+
+ private final Long max;
+ private final boolean maxInclusive;
+
+ public SizeValidator() {
+ this(null, false, null, false);
+ }
+
+ public SizeValidator(Long min, boolean minInclusive, Long max, boolean maxInclusive) {
+ this.min = min;
+ this.minInclusive = minInclusive;
+ this.max = max;
+ this.maxInclusive = maxInclusive;
+ }
+
+ @Override
+ public String validate(String value) {
+ try {
+ long size = HiveConf.toSizeBytes(value);
+ if (min != null && (minInclusive ? size < min : size <= min)) {
+ return value + " is smaller than " + sizeString(min);
+ }
+ if (max != null && (maxInclusive ? size > max : size >= max)) {
+ return value + " is bigger than " + sizeString(max);
+ }
+ } catch (Exception e) {
+ return e.toString();
+ }
+ return null;
+ }
+
+ public String toDescription() {
+ String description =
+ "Expects a byte size value with unit (blank for bytes, kb, mb, gb, tb, pb)";
+ if (min != null && max != null) {
+ description += ".\nThe size should be in between " +
+ sizeString(min) + (minInclusive ? " (inclusive)" : " (exclusive)") + " and " +
+ sizeString(max) + (maxInclusive ? " (inclusive)" : " (exclusive)");
+ } else if (min != null) {
+ description += ".\nThe time should be bigger than " +
+ (minInclusive ? "or equal to " : "") + sizeString(min);
+ } else if (max != null) {
+ description += ".\nThe size should be smaller than " +
+ (maxInclusive ? "or equal to " : "") + sizeString(max);
+ }
+ return description;
+ }
+
+ private String sizeString(long size) {
+ final String[] units = { " bytes", "Kb", "Mb", "Gb", "Tb" };
+ long current = 1;
+ for (int i = 0; i < units.length && current > 0; ++i) {
+ long next = current << 10;
+ if ((size & (next - 1)) != 0) return (long)(size / current) + units[i];
+ current = next;
+ }
+ return current > 0 ? ((long)(size / current) + "Pb") : (size + units[0]);
+ }
+ }
}
diff --git a/data/files/windowing_distinct.txt b/data/files/windowing_distinct.txt
new file mode 100644
index 0000000000000000000000000000000000000000..927120220f0a9558e26e28dfebc9236000a6a706
--- /dev/null
+++ b/data/files/windowing_distinct.txt
@@ -0,0 +1,6 @@
+1|19|442|65553|4294967380|26.43|37.77|true|alice zipper|2013-03-01 09:11:58.703217|29.62|^Ahistory^B
+2|124|336|65664|4294967435|74.72|42.47|true|bob davidson|2013-03-01 09:11:58.703302|45.40|^Ayard duty^B
+1|19|442|65553|4294967380|26.43|37.77|true|alice zipper|2013-03-01 09:11:58.703217|29.62|^Ahistory^B
+1|35|387|65619|4294967459|96.91|18.86|false|katie davidson|2013-03-01 09:11:58.703079|27.32|^Ahistory^B
+2|111|372|65656|4294967312|13.01|34.95|false|xavier quirinius|2013-03-01 09:11:58.703310|23.91|^Atopology^B
+2|124|336|65664|4294967435|74.72|42.47|true|bob davidson|2013-03-01 09:11:58.703302|45.40|^Ayard duty^B
diff --git a/errata.txt b/errata.txt
index 6ac1d41aa1476553a77d0c5a58f4a4c7c3d891f7..9d52cdf5283045663a931648400ff8703833223c 100644
--- a/errata.txt
+++ b/errata.txt
@@ -76,3 +76,5 @@ fc6be8faf5c97901ccad33edca8f8f80023b308a llap HIVE-12151 https://issues.ap
c6565f5d65da9ed5cb452db7e313d0ce7abc1105 llap HIVE-9729 https://issues.apache.org/jira/browse/HIVE-9729
d8298e1c85a515150562b0df68af89c18c468638 llap HIVE-9418 https://issues.apache.org/jira/browse/HIVE-9418
034280ce070d812f1eb312567a974a8720943647 master HIVE-12272 https://issues.apache.org/jira/browse/HIVE-12272
+36e855084da833915dfe6c34f74e19352b64fde9 master HIVE-12826 https://issues.apache.org/jira/browse/HIVE-12826
+9cab4414caf1bba2eb1852536a9d3676ba7eab21 master HIVE-12827 https://issues.apache.org/jira/browse/HIVE-12827
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatAuthUtil.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatAuthUtil.java
index 6dce9c4b1d1218ce1f8b4ebffee0eaacb31d25cd..ff10b05128f5b4ee8bf814f82a645c74ed57b376 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatAuthUtil.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatAuthUtil.java
@@ -20,17 +20,32 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
import org.apache.hadoop.hive.ql.session.SessionState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
final class HCatAuthUtil {
+ private static final Logger LOG = LoggerFactory.getLogger(HCatAuthUtil.class);
+
public static boolean isAuthorizationEnabled(Configuration conf) {
- // the session state getAuthorizer can return null even if authorization is
- // enabled if the V2 api of authorizer in use.
+ if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
+ return false;
+ }
+ // If the V2 api of authorizer in use, the session state getAuthorizer return null.
+ // Here we disable authorization if we use V2 api or the DefaultHiveAuthorizationProvider
// The additional authorization checks happening in hcatalog are designed to
// work with storage based authorization (on client side). It should not try doing
- // additional checks if a V2 authorizer is in use. The reccomended configuration is to
- // use storage based authorization in metastore server
- return HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)
- && SessionState.get().getAuthorizer() != null;
+ // additional checks if a V2 authorizer or DefaultHiveAuthorizationProvider is in use.
+ // The recommended configuration is to use storage based authorization in metastore server.
+ // However, if user define a custom V1 authorization, it will be honored.
+ if (SessionState.get().getAuthorizer() == null ||
+ HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER)
+ == DefaultHiveAuthorizationProvider.class.getName()) {
+ LOG.info("Metastore authorizer is skipped for V2 authorizer or"
+ + " DefaultHiveAuthorizationProvider");
+ return false;
+ }
+ return true;
}
}
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java
index 320ace4352c1212bab093d38635c4d70005bd473..a7c9f29ecc2dc7fdf99c401af0be7bff54d50efe 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java
@@ -62,6 +62,8 @@
private final Map dynamicObjectInspectors;
private Map dynamicOutputJobInfo;
+ private String HIVE_DEFAULT_PARTITION_VALUE = null;
+
/**
* @param baseWriter RecordWriter to contain
* @param context current TaskAttemptContext
@@ -86,6 +88,7 @@ public DynamicPartitionFileRecordWriterContainer(
this.dynamicContexts = new HashMap();
this.dynamicObjectInspectors = new HashMap();
this.dynamicOutputJobInfo = new HashMap();
+ this.HIVE_DEFAULT_PARTITION_VALUE = HiveConf.getVar(context.getConfiguration(), HiveConf.ConfVars.DEFAULTPARTITIONNAME);
}
@Override
@@ -136,7 +139,8 @@ protected LocalFileWriter getLocalFileWriter(HCatRecord value) throws IOExceptio
// be done before we delete cols.
List dynamicPartValues = new ArrayList();
for (Integer colToAppend : dynamicPartCols) {
- dynamicPartValues.add(value.get(colToAppend).toString());
+ Object partitionValue = value.get(colToAppend);
+ dynamicPartValues.add(partitionValue == null? HIVE_DEFAULT_PARTITION_VALUE : partitionValue.toString());
}
String dynKey = dynamicPartValues.toString();
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
index bc56d77c00bd5ac908c913948ad93f89c60d57ab..ef7aa48f104d4522cb4ea045ae20b28ae1a27349 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.metastore.HiveMetaHook;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.IOConstants;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler;
@@ -130,6 +131,8 @@ public void configureInputJobProperties(TableDesc tableDesc,
jobProperties.put(IOConstants.SCHEMA_EVOLUTION_COLUMNS, columnNamesSb.toString());
jobProperties.put(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, typeNamesSb.toString());
+ boolean isAcidTable = AcidUtils.isTablePropertyTransactional(tableProperties);
+ AcidUtils.setTransactionalTableScan(jobProperties, isAcidTable);
}
} catch (IOException e) {
throw new IllegalStateException("Failed to set output path", e);
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/TestHCatAuthUtil.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/TestHCatAuthUtil.java
index 830dcb8119c255127f64a128a1d7b66f041bfb3e..a190002d7a8088899443caa4a56995dd478e1484 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/TestHCatAuthUtil.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/TestHCatAuthUtil.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
@@ -49,12 +50,13 @@ public HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory metastoreC
}
/**
- * Test with auth enabled and v1 auth
+ * Test with auth enabled and StorageBasedAuthorizationProvider
*/
@Test
public void authEnabledV1Auth() throws Exception {
HiveConf hcatConf = new HiveConf(this.getClass());
hcatConf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true);
+ hcatConf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, StorageBasedAuthorizationProvider.class.getName());
SessionState.start(hcatConf);
assertTrue("hcat auth should be enabled", HCatAuthUtil.isAuthorizationEnabled(hcatConf));
}
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
index 8148faa8741952b0f92c33eaaa5ca908dfa47365..61efc1adc4e2c5d0a90c6f49a9f1235fc05011aa 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
@@ -26,7 +26,6 @@
import java.util.HashMap;
import java.util.List;
import java.util.Random;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@@ -40,6 +39,7 @@
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.FetchTask;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -392,7 +392,7 @@ private Path createInputFile() throws IOException {
}
FetchTask task = new FetchTask();
task.setWork(work);
- task.initialize(conf, null, null);
+ task.initialize(conf, null, null, new CompilationOpContext());
task.fetch(temp);
for (String str : temp) {
results.add(str.replace("\t", ","));
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index ff2598f9a8e4d3e83292e5b85bf137aaea53a24d..bde78e4d49712d8fd3dc7c0db5e7ac4e6ec23ba1 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.hive.cli.CliSessionState;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -55,10 +56,12 @@
import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
import org.apache.hadoop.hive.metastore.api.TxnInfo;
import org.apache.hadoop.hive.metastore.api.TxnState;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
import org.apache.hadoop.hive.ql.CommandNeedRetryException;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.IOConstants;
import org.apache.hadoop.hive.ql.io.orc.FileDump;
import org.apache.hadoop.hive.ql.io.orc.OrcFile;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
@@ -464,8 +467,9 @@ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int
JobConf job = new JobConf();
job.set("mapred.input.dir", partitionPath.toString());
job.set("bucket_count", Integer.toString(buckets));
- job.set("columns", "id,msg");
- job.set("columns.types", "bigint:string");
+ job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
+ job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
+ job.set(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, "true");
job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
InputSplit[] splits = inf.getSplits(job, buckets);
Assert.assertEquals(buckets, splits.length);
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
index 339e9ef8a5517ee17e84ab63ea168a01c415db73..686767908acf9144ae8a587b26112c58f890eb59 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
@@ -27,13 +27,16 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.io.AcidInputFormat.AcidRecordReader;
import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.IOConstants;
import org.apache.hadoop.hive.ql.io.AcidUtils.Directory;
import org.apache.hadoop.hive.ql.io.RecordIdentifier;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
@@ -128,8 +131,9 @@ public void assertMaxTransactionId(long expectedMaxTransactionId) {
JobConf job = new JobConf();
job.set("mapred.input.dir", partitionLocation.toString());
job.set("bucket_count", Integer.toString(table.getSd().getNumBuckets()));
- job.set("columns", "id,msg");
- job.set("columns.types", "bigint:string");
+ job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
+ job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
+ job.set(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, "true");
job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
InputSplit[] splits = inputFormat.getSplits(job, 1);
assertEquals(1, splits.length);
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java
index 24bebb821ebff6707620f363cac9ee5ce47eff50..9fbbf90d9896446bb86f39f88d493b77db41c828 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java
@@ -464,7 +464,7 @@ public void oneMondoTest() throws Exception {
Assert.assertEquals("{\"tableName\":\"tab0\",\"dbName\":\"db0\",\"owner\":\"me\"," +
"\"createTime\":0,\"lastAccessTime\":0,\"retention\":0," +
"\"partitionKeys\":[{\"name\":\"pcol1\",\"type\":\"string\",\"comment\":\"\"}," +
- "{\"name\":\"pcol2\",\"type\":\"string\",\"comment\":\"\"}],\"parameters\":{}," +
+ "{\"name\":\"pcol2\",\"type\":\"string\",\"comment\":\"\"}],\"parameters\":{\"COLUMN_STATS_ACCURATE\":\"{\\\"COLUMN_STATS\\\":{\\\"col1\\\":\\\"true\\\",\\\"col2\\\":\\\"true\\\"}}\"}," +
"\"tableType\":\"\"} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats: column " +
"col1: {\"colName\":\"col1\",\"colType\":\"int\"," +
"\"statsData\":{\"longStats\":{\"lowValue\":-95,\"highValue\":95,\"numNulls\":1," +
@@ -516,7 +516,7 @@ public void oneMondoTest() throws Exception {
out = new PrintStream(outStr);
tool.go(false, HBaseReadWrite.PART_TABLE, "db0.tab1.c.d", null, conf, out, err);
Assert.assertEquals("{\"values\":[\"c\",\"d\"],\"dbName\":\"db0\",\"tableName\":\"tab1\"," +
- "\"createTime\":0,\"lastAccessTime\":0,\"parameters\":{}} sdHash: qQTgZAi5VzgpozzFGmIVTQ " +
+ "\"createTime\":0,\"lastAccessTime\":0,\"parameters\":{\"COLUMN_STATS_ACCURATE\":\"{\\\"COLUMN_STATS\\\":{\\\"col1\\\":\\\"true\\\",\\\"col2\\\":\\\"true\\\"}}\"}} sdHash: qQTgZAi5VzgpozzFGmIVTQ " +
"stats: column col1: {\"colName\":\"col1\",\"colType\":\"int\"," +
"\"statsData\":{\"longStats\":{\"lowValue\":-95,\"highValue\":95,\"numNulls\":1," +
"\"numDVs\":2}}} column col2: {\"colName\":\"col2\",\"colType\":\"varchar(32)\"," +
@@ -530,7 +530,7 @@ public void oneMondoTest() throws Exception {
"\"createTime\":0,\"lastAccessTime\":0,\"parameters\":{}} sdHash: qQTgZAi5VzgpozzFGmIVTQ " +
"stats:" + lsep +
"{\"values\":[\"c\",\"d\"],\"dbName\":\"db0\",\"tableName\":\"tab1\",\"createTime\":0," +
- "\"lastAccessTime\":0,\"parameters\":{}} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats: column " +
+ "\"lastAccessTime\":0,\"parameters\":{\"COLUMN_STATS_ACCURATE\":\"{\\\"COLUMN_STATS\\\":{\\\"col1\\\":\\\"true\\\",\\\"col2\\\":\\\"true\\\"}}\"}} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats: column " +
"col1: {\"colName\":\"col1\",\"colType\":\"int\"," +
"\"statsData\":{\"longStats\":{\"lowValue\":-95,\"highValue\":95,\"numNulls\":1," +
"\"numDVs\":2}}} column col2: {\"colName\":\"col2\",\"colType\":\"varchar(32)\"," +
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java
index c62190421cb979790d8c1df5c952292c39fd2ab3..2cc1373108668b26e7e2a64cd5fff893159528bd 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java
@@ -1667,7 +1667,12 @@ public void partitionStatistics() throws Exception {
Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, partCols,
emptyParameters, null, null, null);
store.createTable(table);
-
+ for (String partVal : partVals) {
+ Partition part = new Partition(Arrays.asList(partVal), dbname, tableName, (int) now / 1000,
+ (int) now / 1000, sd, emptyParameters);
+ store.addPartition(part);
+ }
+
for (int i = 0; i < partNames.length; i++) {
ColumnStatistics stats = new ColumnStatistics();
ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
index 4529ce396e209d7a8e7648b2213bcab12cb28e50..3596e3015950b101699e4eba38d8d262d1726e3b 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
@@ -90,6 +90,8 @@ protected void setUp() throws Exception {
AuthorizationPreEventListener.class.getName());
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname,
getAuthorizationProvider());
+ System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER.varname,
+ getAuthorizationProvider());
setupMetaStoreReadAuthorization();
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname,
InjectableDummyAuthenticator.class.getName());
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index da367ca363ab11cb371236beab71cc2d6c43525b..071a17e2a0b71271d772d243c1fc4ce52026db60 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hive.cli.CliSessionState;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -36,6 +37,7 @@
import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
import org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
@@ -44,6 +46,7 @@
import org.apache.hadoop.hive.ql.io.AcidInputFormat;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
+import org.apache.hadoop.hive.ql.io.IOConstants;
import org.apache.hadoop.hive.ql.io.RecordIdentifier;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
@@ -790,63 +793,6 @@ public void minorCompactAfterAbort() throws Exception {
}
}
- /**
- * HIVE-12352 has details
- * @throws Exception
- */
- @Test
- public void writeBetweenWorkerAndCleaner() throws Exception {
- String tblName = "HIVE12352";
- executeStatementOnDriver("drop table if exists " + tblName, driver);
- executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " +
- " CLUSTERED BY(a) INTO 1 BUCKETS" + //currently ACID requires table to be bucketed
- " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver);
-
- //create some data
- executeStatementOnDriver("insert into " + tblName + " values(1, 'foo'),(2, 'bar'),(3, 'baz')", driver);
- executeStatementOnDriver("update " + tblName + " set b = 'blah' where a = 3", driver);
-
- //run Worker to execute compaction
- CompactionTxnHandler txnHandler = new CompactionTxnHandler(conf);
- txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
- Worker t = new Worker();
- t.setThreadId((int) t.getId());
- t.setHiveConf(conf);
- AtomicBoolean stop = new AtomicBoolean(true);
- AtomicBoolean looped = new AtomicBoolean();
- t.init(stop, looped);
- t.run();
-
- //delete something, but make sure txn is rolled back
- conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true);
- executeStatementOnDriver("delete from " + tblName + " where a = 1", driver);
- conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false);
-
- List expected = new ArrayList<>();
- expected.add("1\tfoo");
- expected.add("2\tbar");
- expected.add("3\tblah");
- Assert.assertEquals("", expected,
- execSelectAndDumpData("select a,b from " + tblName + " order by a", driver, "writeBetweenWorkerAndCleaner()"));
-
- //run Cleaner
- Cleaner c = new Cleaner();
- c.setThreadId((int)c.getId());
- c.setHiveConf(conf);
- c.init(stop, new AtomicBoolean());
- c.run();
-
- //this seems odd, but we wan to make sure that to run CompactionTxnHandler.cleanEmptyAbortedTxns()
- Initiator i = new Initiator();
- i.setThreadId((int)i.getId());
- i.setHiveConf(conf);
- i.init(stop, new AtomicBoolean());
- i.run();
-
- //check that aborted operation didn't become committed
- Assert.assertEquals("", expected,
- execSelectAndDumpData("select a,b from " + tblName + " order by a", driver, "writeBetweenWorkerAndCleaner()"));
- }
@Test
public void majorCompactAfterAbort() throws Exception {
String dbName = "default";
@@ -967,8 +913,9 @@ public long getHighWatermark() {
OrcInputFormat aif = new OrcInputFormat();
Configuration conf = new Configuration();
- conf.set("columns", columnNamesProperty);
- conf.set("columns.types", columnTypesProperty);
+ conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, columnNamesProperty);
+ conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, columnTypesProperty);
+ HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true);
AcidInputFormat.RawReader reader =
aif.getRawReader(conf, false, bucket, txnList, base, deltas);
RecordIdentifier identifier = reader.createKey();
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 09905ea1fe98ea5aaf12b902081a41b53ed9ad4e..4eaff10fb9bf2f13ea5ce242d4a2689c765e6634 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -132,6 +132,7 @@ public static void setUpBeforeClass() throws SQLException, ClassNotFoundExceptio
Connection con1 = getConnection("default");
System.setProperty(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose");
System.setProperty(ConfVars.HIVEMAPREDMODE.varname, "nonstrict");
+ System.setProperty(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider");
Statement stmt1 = con1.createStatement();
assertNotNull("Statement is null", stmt1);
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index a2ccfe0e86512021c70e79e04242c16325435e0f..8318c3a5c51e329d427815fcb60ce40c3aab5773 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -69,6 +69,7 @@ disabled.query.files=ql_rewrite_gbtoidx.q,\
smb_mapjoin_8.q
minitez.query.files.shared=acid_globallimit.q,\
+ empty_join.q,\
alter_merge_2_orc.q,\
alter_merge_orc.q,\
alter_merge_stats_orc.q,\
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index fa16020361e05059e4d81dbb0c52e3ade1c9cdc6..54ac85a03b9f4c8343a70926cbcfa6fc2779c4b1 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -726,6 +726,8 @@ public void clearTablesCreatedDuringTests() throws Exception {
return;
}
+ db.getConf().set("hive.metastore.filter.hook",
+ "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl");
// Delete any tables other than the source tables
// and any databases other than the default database.
for (String dbName : db.getAllDatabases()) {
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstanceSet.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstanceSet.java
index 388b5f3ab8154943ae8a6a5d387b6e3766480585..be811eb040b9ddca4eff582404058ee9e7e1b8be 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstanceSet.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstanceSet.java
@@ -14,6 +14,7 @@
package org.apache.hadoop.hive.llap.registry;
import java.io.IOException;
+import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -30,6 +31,13 @@
public Map getAll();
/**
+ * Gets a list containing all the instances. This list has the same iteration order across
+ * different processes, assuming the list of registry entries is the same.
+ * @return
+ */
+ public List getAllInstancesOrdered();
+
+ /**
* Get an instance by worker identity.
*
* @param name
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
index ef9de320c5c112bef6c5293e8caea980ade38eb1..92044bbf08bbe0c9e89e6c3c79dcb4a150a5628b 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
@@ -17,8 +17,13 @@
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
+import java.util.Collections;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -176,7 +181,8 @@ public String toString() {
private final class FixedServiceInstanceSet implements ServiceInstanceSet {
- private final Map instances = new HashMap();
+ // LinkedHashMap have a repeatable iteration order.
+ private final Map instances = new LinkedHashMap<>();
public FixedServiceInstanceSet() {
for (String host : hosts) {
@@ -191,6 +197,19 @@ public FixedServiceInstanceSet() {
}
@Override
+ public List getAllInstancesOrdered() {
+ List list = new LinkedList<>();
+ list.addAll(instances.values());
+ Collections.sort(list, new Comparator() {
+ @Override
+ public int compare(ServiceInstance o1, ServiceInstance o2) {
+ return o2.getWorkerIdentity().compareTo(o2.getWorkerIdentity());
+ }
+ });
+ return list;
+ }
+
+ @Override
public ServiceInstance getInstance(String name) {
return instances.get(name);
}
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java
index 740f3734d179d206b6f73407e0bea702f7464113..907faed516b9335ee62e5b407cbaff0182405f09 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java
@@ -14,11 +14,13 @@
package org.apache.hadoop.hive.llap.registry.impl;
import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.configuration.LlapConfiguration;
import org.apache.hadoop.hive.llap.registry.ServiceInstanceSet;
import org.apache.hadoop.hive.llap.registry.ServiceRegistry;
import org.apache.hadoop.service.AbstractService;
@@ -32,11 +34,45 @@
private ServiceRegistry registry = null;
private final boolean isDaemon;
+ private static final Map yarnRegistries = new HashMap<>();
+
public LlapRegistryService(boolean isDaemon) {
super("LlapRegistryService");
this.isDaemon = isDaemon;
}
+ /**
+ * Helper method to get a ServiceRegistry instance to read from the registry.
+ * This should not be used by LLAP daemons.
+ *
+ * @param conf {@link Configuration} instance which contains service registry information.
+ * @return
+ */
+ public static synchronized LlapRegistryService getClient(Configuration conf) {
+ String hosts = HiveConf.getTrimmedVar(conf, HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS);
+ Preconditions.checkNotNull(hosts, ConfVars.LLAP_DAEMON_SERVICE_HOSTS.toString() + " must be defined");
+ LlapRegistryService registry;
+ if (hosts.startsWith("@")) {
+ // Caching instances only in case of the YARN registry. Each host based list will get it's own copy.
+ String name = hosts.substring(1);
+ if (yarnRegistries.containsKey(name)) {
+ registry = yarnRegistries.get(name);
+ } else {
+ registry = new LlapRegistryService(false);
+ registry.init(conf);
+ registry.start();
+ yarnRegistries.put(name, registry);
+ }
+ } else {
+ registry = new LlapRegistryService(false);
+ registry.init(conf);
+ registry.start();
+ }
+ LOG.info("Using LLAP registry (client) type: " + registry);
+ return registry;
+ }
+
+
@Override
public void serviceInit(Configuration conf) {
String hosts = HiveConf.getTrimmedVar(conf, ConfVars.LLAP_DAEMON_SERVICE_HOSTS);
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapYarnRegistryImpl.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapYarnRegistryImpl.java
index fc2ebf2347845325da2042dda05cfe6c1d2f983c..efe31ccc23e7bdfe3638f6d4d7339f1281ad0126 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapYarnRegistryImpl.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapYarnRegistryImpl.java
@@ -20,15 +20,20 @@
import java.net.URISyntaxException;
import java.net.URL;
import java.net.UnknownHostException;
+import java.util.Collections;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
@@ -269,16 +274,47 @@ public int getManagementPort() {
// LinkedHashMap to retain iteration order.
private final Map instances = new LinkedHashMap<>();
+ private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+ private final ReentrantReadWriteLock.ReadLock readLock = lock.readLock();
+ private final ReentrantReadWriteLock.WriteLock writeLock = lock.writeLock();
@Override
- public synchronized Map getAll() {
+ public Map getAll() {
// Return a copy. Instances may be modified during a refresh.
- return new LinkedHashMap<>(instances);
+ readLock.lock();
+ try {
+ return new LinkedHashMap<>(instances);
+ } finally {
+ readLock.unlock();
+ }
}
@Override
- public synchronized ServiceInstance getInstance(String name) {
- return instances.get(name);
+ public List getAllInstancesOrdered() {
+ List list = new LinkedList<>();
+ readLock.lock();
+ try {
+ list.addAll(instances.values());
+ } finally {
+ readLock.unlock();
+ }
+ Collections.sort(list, new Comparator() {
+ @Override
+ public int compare(ServiceInstance o1, ServiceInstance o2) {
+ return o2.getWorkerIdentity().compareTo(o2.getWorkerIdentity());
+ }
+ });
+ return list;
+ }
+
+ @Override
+ public ServiceInstance getInstance(String name) {
+ readLock.lock();
+ try {
+ return instances.get(name);
+ } finally {
+ readLock.unlock();
+ }
}
@Override
@@ -290,7 +326,8 @@ public void refresh() throws IOException {
Map records =
RegistryUtils.listServiceRecords(client, RegistryPathUtils.parentOf(path));
// Synchronize after reading the service records from the external service (ZK)
- synchronized (this) {
+ writeLock.lock();
+ try {
Set latestKeys = new HashSet();
LOG.info("Starting to refresh ServiceInstanceSet " + System.identityHashCode(this));
for (ServiceRecord rec : records.values()) {
@@ -333,28 +370,34 @@ public void refresh() throws IOException {
} else {
this.instances.putAll(freshInstances);
}
+ } finally {
+ writeLock.unlock();
}
}
@Override
- public synchronized Set getByHost(String host) {
+ public Set getByHost(String host) {
// TODO Maybe store this as a map which is populated during construction, to avoid walking
// the map on each request.
+ readLock.lock();
Set byHost = new HashSet();
-
- for (ServiceInstance i : instances.values()) {
- if (host.equals(i.getHost())) {
- // all hosts in instances should be alive in this impl
- byHost.add(i);
+ try {
+ for (ServiceInstance i : instances.values()) {
+ if (host.equals(i.getHost())) {
+ // all hosts in instances should be alive in this impl
+ byHost.add(i);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Locality comparing " + host + " to " + i.getHost());
+ }
}
if (LOG.isDebugEnabled()) {
- LOG.debug("Locality comparing " + host + " to " + i.getHost());
+ LOG.debug("Returning " + byHost.size() + " hosts for locality allocation on " + host);
}
+ return byHost;
+ } finally {
+ readLock.unlock();
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Returning " + byHost.size() + " hosts for locality allocation on " + host);
- }
- return byHost;
}
}
diff --git a/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java b/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
index d2180e55e8008b22d74efe6d155a16271e357f72..4ab7b32fc3fca1ae3b2ae8f66be5848800e5c353 100644
--- a/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ b/llap-server/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -3245,6 +3245,16 @@ public Builder clearMergedInputDescriptor() {
com.google.protobuf.ByteString
getDagNameBytes();
+ // optional int32 dag_id = 11;
+ /**
+ * optional int32 dag_id = 11;
+ */
+ boolean hasDagId();
+ /**
+ * optional int32 dag_id = 11;
+ */
+ int getDagId();
+
// optional string vertex_name = 3;
/**
* optional string vertex_name = 3;
@@ -3441,13 +3451,13 @@ private FragmentSpecProto(
break;
}
case 26: {
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000008;
vertexName_ = input.readBytes();
break;
}
case 34: {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null;
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = processorDescriptor_.toBuilder();
}
processorDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry);
@@ -3455,48 +3465,53 @@ private FragmentSpecProto(
subBuilder.mergeFrom(processorDescriptor_);
processorDescriptor_ = subBuilder.buildPartial();
}
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
break;
}
case 42: {
- if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
inputSpecs_ = new java.util.ArrayList();
- mutable_bitField0_ |= 0x00000010;
+ mutable_bitField0_ |= 0x00000020;
}
inputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry));
break;
}
case 50: {
- if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
outputSpecs_ = new java.util.ArrayList();
- mutable_bitField0_ |= 0x00000020;
+ mutable_bitField0_ |= 0x00000040;
}
outputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry));
break;
}
case 58: {
- if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
groupedInputSpecs_ = new java.util.ArrayList();
- mutable_bitField0_ |= 0x00000040;
+ mutable_bitField0_ |= 0x00000080;
}
groupedInputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.PARSER, extensionRegistry));
break;
}
case 64: {
- bitField0_ |= 0x00000010;
+ bitField0_ |= 0x00000020;
vertexParallelism_ = input.readInt32();
break;
}
case 72: {
- bitField0_ |= 0x00000020;
+ bitField0_ |= 0x00000040;
fragmentNumber_ = input.readInt32();
break;
}
case 80: {
- bitField0_ |= 0x00000040;
+ bitField0_ |= 0x00000080;
attemptNumber_ = input.readInt32();
break;
}
+ case 88: {
+ bitField0_ |= 0x00000004;
+ dagId_ = input.readInt32();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -3505,13 +3520,13 @@ private FragmentSpecProto(
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
- if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_);
}
- if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+ if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_);
}
- if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_);
}
this.unknownFields = unknownFields.build();
@@ -3632,6 +3647,22 @@ public boolean hasDagName() {
}
}
+ // optional int32 dag_id = 11;
+ public static final int DAG_ID_FIELD_NUMBER = 11;
+ private int dagId_;
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public boolean hasDagId() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public int getDagId() {
+ return dagId_;
+ }
+
// optional string vertex_name = 3;
public static final int VERTEX_NAME_FIELD_NUMBER = 3;
private java.lang.Object vertexName_;
@@ -3639,7 +3670,7 @@ public boolean hasDagName() {
* optional string vertex_name = 3;
*/
public boolean hasVertexName() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional string vertex_name = 3;
@@ -3682,7 +3713,7 @@ public boolean hasVertexName() {
* optional .EntityDescriptorProto processor_descriptor = 4;
*/
public boolean hasProcessorDescriptor() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
+ return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .EntityDescriptorProto processor_descriptor = 4;
@@ -3812,7 +3843,7 @@ public int getGroupedInputSpecsCount() {
* optional int32 vertex_parallelism = 8;
*/
public boolean hasVertexParallelism() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
+ return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional int32 vertex_parallelism = 8;
@@ -3828,7 +3859,7 @@ public int getVertexParallelism() {
* optional int32 fragment_number = 9;
*/
public boolean hasFragmentNumber() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
+ return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional int32 fragment_number = 9;
@@ -3844,7 +3875,7 @@ public int getFragmentNumber() {
* optional int32 attempt_number = 10;
*/
public boolean hasAttemptNumber() {
- return ((bitField0_ & 0x00000040) == 0x00000040);
+ return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional int32 attempt_number = 10;
@@ -3856,6 +3887,7 @@ public int getAttemptNumber() {
private void initFields() {
fragmentIdentifierString_ = "";
dagName_ = "";
+ dagId_ = 0;
vertexName_ = "";
processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
inputSpecs_ = java.util.Collections.emptyList();
@@ -3883,10 +3915,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getDagNameBytes());
}
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(3, getVertexNameBytes());
}
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(4, processorDescriptor_);
}
for (int i = 0; i < inputSpecs_.size(); i++) {
@@ -3898,15 +3930,18 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
for (int i = 0; i < groupedInputSpecs_.size(); i++) {
output.writeMessage(7, groupedInputSpecs_.get(i));
}
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeInt32(8, vertexParallelism_);
}
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeInt32(9, fragmentNumber_);
}
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeInt32(10, attemptNumber_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(11, dagId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -3924,11 +3959,11 @@ public int getSerializedSize() {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getDagNameBytes());
}
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getVertexNameBytes());
}
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, processorDescriptor_);
}
@@ -3944,18 +3979,22 @@ public int getSerializedSize() {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, groupedInputSpecs_.get(i));
}
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(8, vertexParallelism_);
}
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(9, fragmentNumber_);
}
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(10, attemptNumber_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(11, dagId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -3989,6 +4028,11 @@ public boolean equals(final java.lang.Object obj) {
result = result && getDagName()
.equals(other.getDagName());
}
+ result = result && (hasDagId() == other.hasDagId());
+ if (hasDagId()) {
+ result = result && (getDagId()
+ == other.getDagId());
+ }
result = result && (hasVertexName() == other.hasVertexName());
if (hasVertexName()) {
result = result && getVertexName()
@@ -4041,6 +4085,10 @@ public int hashCode() {
hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
hash = (53 * hash) + getDagName().hashCode();
}
+ if (hasDagId()) {
+ hash = (37 * hash) + DAG_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getDagId();
+ }
if (hasVertexName()) {
hash = (37 * hash) + VERTEX_NAME_FIELD_NUMBER;
hash = (53 * hash) + getVertexName().hashCode();
@@ -4190,38 +4238,40 @@ public Builder clear() {
bitField0_ = (bitField0_ & ~0x00000001);
dagName_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
- vertexName_ = "";
+ dagId_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
+ vertexName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
if (processorDescriptorBuilder_ == null) {
processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
} else {
processorDescriptorBuilder_.clear();
}
- bitField0_ = (bitField0_ & ~0x00000008);
+ bitField0_ = (bitField0_ & ~0x00000010);
if (inputSpecsBuilder_ == null) {
inputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
} else {
inputSpecsBuilder_.clear();
}
if (outputSpecsBuilder_ == null) {
outputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
} else {
outputSpecsBuilder_.clear();
}
if (groupedInputSpecsBuilder_ == null) {
groupedInputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
} else {
groupedInputSpecsBuilder_.clear();
}
vertexParallelism_ = 0;
- bitField0_ = (bitField0_ & ~0x00000080);
- fragmentNumber_ = 0;
bitField0_ = (bitField0_ & ~0x00000100);
- attemptNumber_ = 0;
+ fragmentNumber_ = 0;
bitField0_ = (bitField0_ & ~0x00000200);
+ attemptNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000400);
return this;
}
@@ -4261,53 +4311,57 @@ public Builder clone() {
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
- result.vertexName_ = vertexName_;
+ result.dagId_ = dagId_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
+ result.vertexName_ = vertexName_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
if (processorDescriptorBuilder_ == null) {
result.processorDescriptor_ = processorDescriptor_;
} else {
result.processorDescriptor_ = processorDescriptorBuilder_.build();
}
if (inputSpecsBuilder_ == null) {
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_);
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
}
result.inputSpecs_ = inputSpecs_;
} else {
result.inputSpecs_ = inputSpecsBuilder_.build();
}
if (outputSpecsBuilder_ == null) {
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_);
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
}
result.outputSpecs_ = outputSpecs_;
} else {
result.outputSpecs_ = outputSpecsBuilder_.build();
}
if (groupedInputSpecsBuilder_ == null) {
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_);
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
}
result.groupedInputSpecs_ = groupedInputSpecs_;
} else {
result.groupedInputSpecs_ = groupedInputSpecsBuilder_.build();
}
- if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
- to_bitField0_ |= 0x00000010;
- }
- result.vertexParallelism_ = vertexParallelism_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000020;
}
- result.fragmentNumber_ = fragmentNumber_;
+ result.vertexParallelism_ = vertexParallelism_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000040;
}
+ result.fragmentNumber_ = fragmentNumber_;
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000080;
+ }
result.attemptNumber_ = attemptNumber_;
result.bitField0_ = to_bitField0_;
onBuilt();
@@ -4335,8 +4389,11 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
dagName_ = other.dagName_;
onChanged();
}
+ if (other.hasDagId()) {
+ setDagId(other.getDagId());
+ }
if (other.hasVertexName()) {
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000008;
vertexName_ = other.vertexName_;
onChanged();
}
@@ -4347,7 +4404,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
if (!other.inputSpecs_.isEmpty()) {
if (inputSpecs_.isEmpty()) {
inputSpecs_ = other.inputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureInputSpecsIsMutable();
inputSpecs_.addAll(other.inputSpecs_);
@@ -4360,7 +4417,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
inputSpecsBuilder_.dispose();
inputSpecsBuilder_ = null;
inputSpecs_ = other.inputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
inputSpecsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getInputSpecsFieldBuilder() : null;
@@ -4373,7 +4430,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
if (!other.outputSpecs_.isEmpty()) {
if (outputSpecs_.isEmpty()) {
outputSpecs_ = other.outputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
} else {
ensureOutputSpecsIsMutable();
outputSpecs_.addAll(other.outputSpecs_);
@@ -4386,7 +4443,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
outputSpecsBuilder_.dispose();
outputSpecsBuilder_ = null;
outputSpecs_ = other.outputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
outputSpecsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getOutputSpecsFieldBuilder() : null;
@@ -4399,7 +4456,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
if (!other.groupedInputSpecs_.isEmpty()) {
if (groupedInputSpecs_.isEmpty()) {
groupedInputSpecs_ = other.groupedInputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
} else {
ensureGroupedInputSpecsIsMutable();
groupedInputSpecs_.addAll(other.groupedInputSpecs_);
@@ -4412,7 +4469,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
groupedInputSpecsBuilder_.dispose();
groupedInputSpecsBuilder_ = null;
groupedInputSpecs_ = other.groupedInputSpecs_;
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
groupedInputSpecsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getGroupedInputSpecsFieldBuilder() : null;
@@ -4605,13 +4662,46 @@ public Builder setDagNameBytes(
return this;
}
+ // optional int32 dag_id = 11;
+ private int dagId_ ;
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public boolean hasDagId() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public int getDagId() {
+ return dagId_;
+ }
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public Builder setDagId(int value) {
+ bitField0_ |= 0x00000004;
+ dagId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 dag_id = 11;
+ */
+ public Builder clearDagId() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ dagId_ = 0;
+ onChanged();
+ return this;
+ }
+
// optional string vertex_name = 3;
private java.lang.Object vertexName_ = "";
/**
* optional string vertex_name = 3;
*/
public boolean hasVertexName() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional string vertex_name = 3;
@@ -4651,7 +4741,7 @@ public Builder setVertexName(
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000008;
vertexName_ = value;
onChanged();
return this;
@@ -4660,7 +4750,7 @@ public Builder setVertexName(
* optional string vertex_name = 3;
*/
public Builder clearVertexName() {
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000008);
vertexName_ = getDefaultInstance().getVertexName();
onChanged();
return this;
@@ -4673,7 +4763,7 @@ public Builder setVertexNameBytes(
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000008;
vertexName_ = value;
onChanged();
return this;
@@ -4687,7 +4777,7 @@ public Builder setVertexNameBytes(
* optional .EntityDescriptorProto processor_descriptor = 4;
*/
public boolean hasProcessorDescriptor() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
+ return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .EntityDescriptorProto processor_descriptor = 4;
@@ -4712,7 +4802,7 @@ public Builder setProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.Lla
} else {
processorDescriptorBuilder_.setMessage(value);
}
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
return this;
}
/**
@@ -4726,7 +4816,7 @@ public Builder setProcessorDescriptor(
} else {
processorDescriptorBuilder_.setMessage(builderForValue.build());
}
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
return this;
}
/**
@@ -4734,7 +4824,7 @@ public Builder setProcessorDescriptor(
*/
public Builder mergeProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
if (processorDescriptorBuilder_ == null) {
- if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ if (((bitField0_ & 0x00000010) == 0x00000010) &&
processorDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) {
processorDescriptor_ =
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(processorDescriptor_).mergeFrom(value).buildPartial();
@@ -4745,7 +4835,7 @@ public Builder mergeProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.L
} else {
processorDescriptorBuilder_.mergeFrom(value);
}
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
return this;
}
/**
@@ -4758,14 +4848,14 @@ public Builder clearProcessorDescriptor() {
} else {
processorDescriptorBuilder_.clear();
}
- bitField0_ = (bitField0_ & ~0x00000008);
+ bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .EntityDescriptorProto processor_descriptor = 4;
*/
public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder getProcessorDescriptorBuilder() {
- bitField0_ |= 0x00000008;
+ bitField0_ |= 0x00000010;
onChanged();
return getProcessorDescriptorFieldBuilder().getBuilder();
}
@@ -4800,9 +4890,9 @@ public Builder clearProcessorDescriptor() {
private java.util.List inputSpecs_ =
java.util.Collections.emptyList();
private void ensureInputSpecsIsMutable() {
- if (!((bitField0_ & 0x00000010) == 0x00000010)) {
+ if (!((bitField0_ & 0x00000020) == 0x00000020)) {
inputSpecs_ = new java.util.ArrayList(inputSpecs_);
- bitField0_ |= 0x00000010;
+ bitField0_ |= 0x00000020;
}
}
@@ -4951,7 +5041,7 @@ public Builder addAllInputSpecs(
public Builder clearInputSpecs() {
if (inputSpecsBuilder_ == null) {
inputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000010);
+ bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
} else {
inputSpecsBuilder_.clear();
@@ -5028,7 +5118,7 @@ public Builder removeInputSpecs(int index) {
inputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>(
inputSpecs_,
- ((bitField0_ & 0x00000010) == 0x00000010),
+ ((bitField0_ & 0x00000020) == 0x00000020),
getParentForChildren(),
isClean());
inputSpecs_ = null;
@@ -5040,9 +5130,9 @@ public Builder removeInputSpecs(int index) {
private java.util.List outputSpecs_ =
java.util.Collections.emptyList();
private void ensureOutputSpecsIsMutable() {
- if (!((bitField0_ & 0x00000020) == 0x00000020)) {
+ if (!((bitField0_ & 0x00000040) == 0x00000040)) {
outputSpecs_ = new java.util.ArrayList(outputSpecs_);
- bitField0_ |= 0x00000020;
+ bitField0_ |= 0x00000040;
}
}
@@ -5191,7 +5281,7 @@ public Builder addAllOutputSpecs(
public Builder clearOutputSpecs() {
if (outputSpecsBuilder_ == null) {
outputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000020);
+ bitField0_ = (bitField0_ & ~0x00000040);
onChanged();
} else {
outputSpecsBuilder_.clear();
@@ -5268,7 +5358,7 @@ public Builder removeOutputSpecs(int index) {
outputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>(
outputSpecs_,
- ((bitField0_ & 0x00000020) == 0x00000020),
+ ((bitField0_ & 0x00000040) == 0x00000040),
getParentForChildren(),
isClean());
outputSpecs_ = null;
@@ -5280,9 +5370,9 @@ public Builder removeOutputSpecs(int index) {
private java.util.List groupedInputSpecs_ =
java.util.Collections.emptyList();
private void ensureGroupedInputSpecsIsMutable() {
- if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+ if (!((bitField0_ & 0x00000080) == 0x00000080)) {
groupedInputSpecs_ = new java.util.ArrayList(groupedInputSpecs_);
- bitField0_ |= 0x00000040;
+ bitField0_ |= 0x00000080;
}
}
@@ -5431,7 +5521,7 @@ public Builder addAllGroupedInputSpecs(
public Builder clearGroupedInputSpecs() {
if (groupedInputSpecsBuilder_ == null) {
groupedInputSpecs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000040);
+ bitField0_ = (bitField0_ & ~0x00000080);
onChanged();
} else {
groupedInputSpecsBuilder_.clear();
@@ -5508,7 +5598,7 @@ public Builder removeGroupedInputSpecs(int index) {
groupedInputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>(
groupedInputSpecs_,
- ((bitField0_ & 0x00000040) == 0x00000040),
+ ((bitField0_ & 0x00000080) == 0x00000080),
getParentForChildren(),
isClean());
groupedInputSpecs_ = null;
@@ -5522,7 +5612,7 @@ public Builder removeGroupedInputSpecs(int index) {
* optional int32 vertex_parallelism = 8;
*/
public boolean hasVertexParallelism() {
- return ((bitField0_ & 0x00000080) == 0x00000080);
+ return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional int32 vertex_parallelism = 8;
@@ -5534,7 +5624,7 @@ public int getVertexParallelism() {
* optional int32 vertex_parallelism = 8;
*/
public Builder setVertexParallelism(int value) {
- bitField0_ |= 0x00000080;
+ bitField0_ |= 0x00000100;
vertexParallelism_ = value;
onChanged();
return this;
@@ -5543,7 +5633,7 @@ public Builder setVertexParallelism(int value) {
* optional int32 vertex_parallelism = 8;
*/
public Builder clearVertexParallelism() {
- bitField0_ = (bitField0_ & ~0x00000080);
+ bitField0_ = (bitField0_ & ~0x00000100);
vertexParallelism_ = 0;
onChanged();
return this;
@@ -5555,7 +5645,7 @@ public Builder clearVertexParallelism() {
* optional int32 fragment_number = 9;
*/
public boolean hasFragmentNumber() {
- return ((bitField0_ & 0x00000100) == 0x00000100);
+ return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional int32 fragment_number = 9;
@@ -5567,7 +5657,7 @@ public int getFragmentNumber() {
* optional int32 fragment_number = 9;
*/
public Builder setFragmentNumber(int value) {
- bitField0_ |= 0x00000100;
+ bitField0_ |= 0x00000200;
fragmentNumber_ = value;
onChanged();
return this;
@@ -5576,7 +5666,7 @@ public Builder setFragmentNumber(int value) {
* optional int32 fragment_number = 9;
*/
public Builder clearFragmentNumber() {
- bitField0_ = (bitField0_ & ~0x00000100);
+ bitField0_ = (bitField0_ & ~0x00000200);
fragmentNumber_ = 0;
onChanged();
return this;
@@ -5588,7 +5678,7 @@ public Builder clearFragmentNumber() {
* optional int32 attempt_number = 10;
*/
public boolean hasAttemptNumber() {
- return ((bitField0_ & 0x00000200) == 0x00000200);
+ return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional int32 attempt_number = 10;
@@ -5600,7 +5690,7 @@ public int getAttemptNumber() {
* optional int32 attempt_number = 10;
*/
public Builder setAttemptNumber(int value) {
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000400;
attemptNumber_ = value;
onChanged();
return this;
@@ -5609,7 +5699,7 @@ public Builder setAttemptNumber(int value) {
* optional int32 attempt_number = 10;
*/
public Builder clearAttemptNumber() {
- bitField0_ = (bitField0_ & ~0x00000200);
+ bitField0_ = (bitField0_ & ~0x00000400);
attemptNumber_ = 0;
onChanged();
return this;
@@ -6510,76 +6600,675 @@ public Builder clearCurrentAttemptStartTime() {
// @@protoc_insertion_point(class_scope:FragmentRuntimeInfo)
}
- public interface SubmitWorkRequestProtoOrBuilder
+ public interface QueryIdentifierProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // optional string container_id_string = 1;
- /**
- * optional string container_id_string = 1;
- */
- boolean hasContainerIdString();
- /**
- * optional string container_id_string = 1;
- */
- java.lang.String getContainerIdString();
- /**
- * optional string container_id_string = 1;
- */
- com.google.protobuf.ByteString
- getContainerIdStringBytes();
-
- // optional string am_host = 2;
+ // optional string app_identifier = 1;
/**
- * optional string am_host = 2;
+ * optional string app_identifier = 1;
*/
- boolean hasAmHost();
+ boolean hasAppIdentifier();
/**
- * optional string am_host = 2;
+ * optional string app_identifier = 1;
*/
- java.lang.String getAmHost();
+ java.lang.String getAppIdentifier();
/**
- * optional string am_host = 2;
+ * optional string app_identifier = 1;
*/
com.google.protobuf.ByteString
- getAmHostBytes();
+ getAppIdentifierBytes();
- // optional int32 am_port = 3;
+ // optional int32 dag_identifier = 2;
/**
- * optional int32 am_port = 3;
+ * optional int32 dag_identifier = 2;
*/
- boolean hasAmPort();
+ boolean hasDagIdentifier();
/**
- * optional int32 am_port = 3;
+ * optional int32 dag_identifier = 2;
*/
- int getAmPort();
+ int getDagIdentifier();
+ }
+ /**
+ * Protobuf type {@code QueryIdentifierProto}
+ */
+ public static final class QueryIdentifierProto extends
+ com.google.protobuf.GeneratedMessage
+ implements QueryIdentifierProtoOrBuilder {
+ // Use QueryIdentifierProto.newBuilder() to construct.
+ private QueryIdentifierProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private QueryIdentifierProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- // optional string token_identifier = 4;
- /**
- * optional string token_identifier = 4;
- */
- boolean hasTokenIdentifier();
- /**
- * optional string token_identifier = 4;
- */
- java.lang.String getTokenIdentifier();
- /**
- * optional string token_identifier = 4;
- */
- com.google.protobuf.ByteString
- getTokenIdentifierBytes();
+ private static final QueryIdentifierProto defaultInstance;
+ public static QueryIdentifierProto getDefaultInstance() {
+ return defaultInstance;
+ }
- // optional bytes credentials_binary = 5;
- /**
- * optional bytes credentials_binary = 5;
- */
- boolean hasCredentialsBinary();
- /**
- * optional bytes credentials_binary = 5;
- */
- com.google.protobuf.ByteString getCredentialsBinary();
+ public QueryIdentifierProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
- // optional string user = 6;
- /**
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private QueryIdentifierProto(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ appIdentifier_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ dagIdentifier_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public QueryIdentifierProto parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new QueryIdentifierProto(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string app_identifier = 1;
+ public static final int APP_IDENTIFIER_FIELD_NUMBER = 1;
+ private java.lang.Object appIdentifier_;
+ /**
+ * optional string app_identifier = 1;
+ */
+ public boolean hasAppIdentifier() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public java.lang.String getAppIdentifier() {
+ java.lang.Object ref = appIdentifier_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ appIdentifier_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public com.google.protobuf.ByteString
+ getAppIdentifierBytes() {
+ java.lang.Object ref = appIdentifier_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ appIdentifier_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int32 dag_identifier = 2;
+ public static final int DAG_IDENTIFIER_FIELD_NUMBER = 2;
+ private int dagIdentifier_;
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public boolean hasDagIdentifier() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public int getDagIdentifier() {
+ return dagIdentifier_;
+ }
+
+ private void initFields() {
+ appIdentifier_ = "";
+ dagIdentifier_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getAppIdentifierBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt32(2, dagIdentifier_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getAppIdentifierBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, dagIdentifier_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) obj;
+
+ boolean result = true;
+ result = result && (hasAppIdentifier() == other.hasAppIdentifier());
+ if (hasAppIdentifier()) {
+ result = result && getAppIdentifier()
+ .equals(other.getAppIdentifier());
+ }
+ result = result && (hasDagIdentifier() == other.hasDagIdentifier());
+ if (hasDagIdentifier()) {
+ result = result && (getDagIdentifier()
+ == other.getDagIdentifier());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasAppIdentifier()) {
+ hash = (37 * hash) + APP_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getAppIdentifier().hashCode();
+ }
+ if (hasDagIdentifier()) {
+ hash = (37 * hash) + DAG_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getDagIdentifier();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code QueryIdentifierProto}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ appIdentifier_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ dagIdentifier_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto build() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto buildPartial() {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.appIdentifier_ = appIdentifier_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.dagIdentifier_ = dagIdentifier_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) {
+ return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto other) {
+ if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) return this;
+ if (other.hasAppIdentifier()) {
+ bitField0_ |= 0x00000001;
+ appIdentifier_ = other.appIdentifier_;
+ onChanged();
+ }
+ if (other.hasDagIdentifier()) {
+ setDagIdentifier(other.getDagIdentifier());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string app_identifier = 1;
+ private java.lang.Object appIdentifier_ = "";
+ /**
+ * optional string app_identifier = 1;
+ */
+ public boolean hasAppIdentifier() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public java.lang.String getAppIdentifier() {
+ java.lang.Object ref = appIdentifier_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ appIdentifier_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public com.google.protobuf.ByteString
+ getAppIdentifierBytes() {
+ java.lang.Object ref = appIdentifier_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ appIdentifier_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public Builder setAppIdentifier(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ appIdentifier_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public Builder clearAppIdentifier() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ appIdentifier_ = getDefaultInstance().getAppIdentifier();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string app_identifier = 1;
+ */
+ public Builder setAppIdentifierBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ appIdentifier_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 dag_identifier = 2;
+ private int dagIdentifier_ ;
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public boolean hasDagIdentifier() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public int getDagIdentifier() {
+ return dagIdentifier_;
+ }
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public Builder setDagIdentifier(int value) {
+ bitField0_ |= 0x00000002;
+ dagIdentifier_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 dag_identifier = 2;
+ */
+ public Builder clearDagIdentifier() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ dagIdentifier_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:QueryIdentifierProto)
+ }
+
+ static {
+ defaultInstance = new QueryIdentifierProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:QueryIdentifierProto)
+ }
+
+ public interface SubmitWorkRequestProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string container_id_string = 1;
+ /**
+ * optional string container_id_string = 1;
+ */
+ boolean hasContainerIdString();
+ /**
+ * optional string container_id_string = 1;
+ */
+ java.lang.String getContainerIdString();
+ /**
+ * optional string container_id_string = 1;
+ */
+ com.google.protobuf.ByteString
+ getContainerIdStringBytes();
+
+ // optional string am_host = 2;
+ /**
+ * optional string am_host = 2;
+ */
+ boolean hasAmHost();
+ /**
+ * optional string am_host = 2;
+ */
+ java.lang.String getAmHost();
+ /**
+ * optional string am_host = 2;
+ */
+ com.google.protobuf.ByteString
+ getAmHostBytes();
+
+ // optional int32 am_port = 3;
+ /**
+ * optional int32 am_port = 3;
+ */
+ boolean hasAmPort();
+ /**
+ * optional int32 am_port = 3;
+ */
+ int getAmPort();
+
+ // optional string token_identifier = 4;
+ /**
+ * optional string token_identifier = 4;
+ */
+ boolean hasTokenIdentifier();
+ /**
+ * optional string token_identifier = 4;
+ */
+ java.lang.String getTokenIdentifier();
+ /**
+ * optional string token_identifier = 4;
+ */
+ com.google.protobuf.ByteString
+ getTokenIdentifierBytes();
+
+ // optional bytes credentials_binary = 5;
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ boolean hasCredentialsBinary();
+ /**
+ * optional bytes credentials_binary = 5;
+ */
+ com.google.protobuf.ByteString getCredentialsBinary();
+
+ // optional string user = 6;
+ /**
* optional string user = 6;
*/
boolean hasUser();
@@ -8800,20 +9489,19 @@ public Builder clearSubmissionState() {
public interface SourceStateUpdatedRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // optional string dag_name = 1;
+ // optional .QueryIdentifierProto query_identifier = 1;
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- boolean hasDagName();
+ boolean hasQueryIdentifier();
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- java.lang.String getDagName();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier();
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- com.google.protobuf.ByteString
- getDagNameBytes();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder();
// optional string src_name = 2;
/**
@@ -8892,8 +9580,16 @@ private SourceStateUpdatedRequestProto(
break;
}
case 10: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = queryIdentifier_.toBuilder();
+ }
+ queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(queryIdentifier_);
+ queryIdentifier_ = subBuilder.buildPartial();
+ }
bitField0_ |= 0x00000001;
- dagName_ = input.readBytes();
break;
}
case 18: {
@@ -8952,47 +9648,26 @@ public SourceStateUpdatedRequestProto parsePartialFrom(
}
private int bitField0_;
- // optional string dag_name = 1;
- public static final int DAG_NAME_FIELD_NUMBER = 1;
- private java.lang.Object dagName_;
+ // optional .QueryIdentifierProto query_identifier = 1;
+ public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_;
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public boolean hasDagName() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- dagName_ = s;
- }
- return s;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ return queryIdentifier_;
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ return queryIdentifier_;
}
// optional string src_name = 2;
@@ -9055,7 +9730,7 @@ public boolean hasState() {
}
private void initFields() {
- dagName_ = "";
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
srcName_ = "";
state_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED;
}
@@ -9072,7 +9747,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getDagNameBytes());
+ output.writeMessage(1, queryIdentifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getSrcNameBytes());
@@ -9091,7 +9766,7 @@ public int getSerializedSize() {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getDagNameBytes());
+ .computeMessageSize(1, queryIdentifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
@@ -9124,10 +9799,10 @@ public boolean equals(final java.lang.Object obj) {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto) obj;
boolean result = true;
- result = result && (hasDagName() == other.hasDagName());
- if (hasDagName()) {
- result = result && getDagName()
- .equals(other.getDagName());
+ result = result && (hasQueryIdentifier() == other.hasQueryIdentifier());
+ if (hasQueryIdentifier()) {
+ result = result && getQueryIdentifier()
+ .equals(other.getQueryIdentifier());
}
result = result && (hasSrcName() == other.hasSrcName());
if (hasSrcName()) {
@@ -9152,9 +9827,9 @@ public int hashCode() {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasDagName()) {
- hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
- hash = (53 * hash) + getDagName().hashCode();
+ if (hasQueryIdentifier()) {
+ hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getQueryIdentifier().hashCode();
}
if (hasSrcName()) {
hash = (37 * hash) + SRC_NAME_FIELD_NUMBER;
@@ -9265,6 +9940,7 @@ private Builder(
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getQueryIdentifierFieldBuilder();
}
}
private static Builder create() {
@@ -9273,7 +9949,11 @@ private static Builder create() {
public Builder clear() {
super.clear();
- dagName_ = "";
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
bitField0_ = (bitField0_ & ~0x00000001);
srcName_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
@@ -9310,7 +9990,11 @@ public Builder clone() {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
- result.dagName_ = dagName_;
+ if (queryIdentifierBuilder_ == null) {
+ result.queryIdentifier_ = queryIdentifier_;
+ } else {
+ result.queryIdentifier_ = queryIdentifierBuilder_.build();
+ }
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
@@ -9335,10 +10019,8 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto other) {
if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance()) return this;
- if (other.hasDagName()) {
- bitField0_ |= 0x00000001;
- dagName_ = other.dagName_;
- onChanged();
+ if (other.hasQueryIdentifier()) {
+ mergeQueryIdentifier(other.getQueryIdentifier());
}
if (other.hasSrcName()) {
bitField0_ |= 0x00000002;
@@ -9375,78 +10057,121 @@ public Builder mergeFrom(
}
private int bitField0_;
- // optional string dag_name = 1;
- private java.lang.Object dagName_ = "";
+ // optional .QueryIdentifierProto query_identifier = 1;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_;
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public boolean hasDagName() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- dagName_ = s;
- return s;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ return queryIdentifier_;
} else {
- return (java.lang.String) ref;
+ return queryIdentifierBuilder_.getMessage();
}
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
+ public Builder setQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ queryIdentifier_ = value;
+ onChanged();
} else {
- return (com.google.protobuf.ByteString) ref;
+ queryIdentifierBuilder_.setMessage(value);
}
+ bitField0_ |= 0x00000001;
+ return this;
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setDagName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- dagName_ = value;
- onChanged();
+ public Builder setQueryIdentifier(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder builderForValue) {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = builderForValue.build();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 1;
+ */
+ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) {
+ queryIdentifier_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial();
+ } else {
+ queryIdentifier_ = value;
+ }
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 1;
+ */
+ public Builder clearQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder clearDagName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- dagName_ = getDefaultInstance().getDagName();
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder getQueryIdentifierBuilder() {
+ bitField0_ |= 0x00000001;
onChanged();
- return this;
+ return getQueryIdentifierFieldBuilder().getBuilder();
}
/**
- * optional string dag_name = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setDagNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- dagName_ = value;
- onChanged();
- return this;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ if (queryIdentifierBuilder_ != null) {
+ return queryIdentifierBuilder_.getMessageOrBuilder();
+ } else {
+ return queryIdentifier_;
+ }
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>
+ getQueryIdentifierFieldBuilder() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>(
+ queryIdentifier_,
+ getParentForChildren(),
+ isClean());
+ queryIdentifier_ = null;
+ }
+ return queryIdentifierBuilder_;
}
// optional string src_name = 2;
@@ -9926,28 +10651,27 @@ public Builder mergeFrom(
com.google.protobuf.ByteString
getQueryIdBytes();
- // optional string dag_name = 2;
+ // optional .QueryIdentifierProto query_identifier = 2;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- boolean hasDagName();
+ boolean hasQueryIdentifier();
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- java.lang.String getDagName();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier();
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- com.google.protobuf.ByteString
- getDagNameBytes();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder();
- // optional int64 delete_delay = 3 [default = 0];
+ // optional int64 delete_delay = 4 [default = 0];
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
boolean hasDeleteDelay();
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
long getDeleteDelay();
}
@@ -10008,11 +10732,19 @@ private QueryCompleteRequestProto(
break;
}
case 18: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = queryIdentifier_.toBuilder();
+ }
+ queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(queryIdentifier_);
+ queryIdentifier_ = subBuilder.buildPartial();
+ }
bitField0_ |= 0x00000002;
- dagName_ = input.readBytes();
break;
}
- case 24: {
+ case 32: {
bitField0_ |= 0x00000004;
deleteDelay_ = input.readInt64();
break;
@@ -10100,60 +10832,39 @@ public boolean hasQueryId() {
}
}
- // optional string dag_name = 2;
- public static final int DAG_NAME_FIELD_NUMBER = 2;
- private java.lang.Object dagName_;
+ // optional .QueryIdentifierProto query_identifier = 2;
+ public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public boolean hasDagName() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- dagName_ = s;
- }
- return s;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ return queryIdentifier_;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ return queryIdentifier_;
}
- // optional int64 delete_delay = 3 [default = 0];
- public static final int DELETE_DELAY_FIELD_NUMBER = 3;
+ // optional int64 delete_delay = 4 [default = 0];
+ public static final int DELETE_DELAY_FIELD_NUMBER = 4;
private long deleteDelay_;
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public boolean hasDeleteDelay() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public long getDeleteDelay() {
return deleteDelay_;
@@ -10161,7 +10872,7 @@ public long getDeleteDelay() {
private void initFields() {
queryId_ = "";
- dagName_ = "";
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
deleteDelay_ = 0L;
}
private byte memoizedIsInitialized = -1;
@@ -10180,10 +10891,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
output.writeBytes(1, getQueryIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getDagNameBytes());
+ output.writeMessage(2, queryIdentifier_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeInt64(3, deleteDelay_);
+ output.writeInt64(4, deleteDelay_);
}
getUnknownFields().writeTo(output);
}
@@ -10200,11 +10911,11 @@ public int getSerializedSize() {
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getDagNameBytes());
+ .computeMessageSize(2, queryIdentifier_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(3, deleteDelay_);
+ .computeInt64Size(4, deleteDelay_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -10234,10 +10945,10 @@ public boolean equals(final java.lang.Object obj) {
result = result && getQueryId()
.equals(other.getQueryId());
}
- result = result && (hasDagName() == other.hasDagName());
- if (hasDagName()) {
- result = result && getDagName()
- .equals(other.getDagName());
+ result = result && (hasQueryIdentifier() == other.hasQueryIdentifier());
+ if (hasQueryIdentifier()) {
+ result = result && getQueryIdentifier()
+ .equals(other.getQueryIdentifier());
}
result = result && (hasDeleteDelay() == other.hasDeleteDelay());
if (hasDeleteDelay()) {
@@ -10261,9 +10972,9 @@ public int hashCode() {
hash = (37 * hash) + QUERY_ID_FIELD_NUMBER;
hash = (53 * hash) + getQueryId().hashCode();
}
- if (hasDagName()) {
- hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
- hash = (53 * hash) + getDagName().hashCode();
+ if (hasQueryIdentifier()) {
+ hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getQueryIdentifier().hashCode();
}
if (hasDeleteDelay()) {
hash = (37 * hash) + DELETE_DELAY_FIELD_NUMBER;
@@ -10370,6 +11081,7 @@ private Builder(
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getQueryIdentifierFieldBuilder();
}
}
private static Builder create() {
@@ -10380,7 +11092,11 @@ public Builder clear() {
super.clear();
queryId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
- dagName_ = "";
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
bitField0_ = (bitField0_ & ~0x00000002);
deleteDelay_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
@@ -10419,7 +11135,11 @@ public Builder clone() {
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- result.dagName_ = dagName_;
+ if (queryIdentifierBuilder_ == null) {
+ result.queryIdentifier_ = queryIdentifier_;
+ } else {
+ result.queryIdentifier_ = queryIdentifierBuilder_.build();
+ }
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
@@ -10445,10 +11165,8 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc
queryId_ = other.queryId_;
onChanged();
}
- if (other.hasDagName()) {
- bitField0_ |= 0x00000002;
- dagName_ = other.dagName_;
- onChanged();
+ if (other.hasQueryIdentifier()) {
+ mergeQueryIdentifier(other.getQueryIdentifier());
}
if (other.hasDeleteDelay()) {
setDeleteDelay(other.getDeleteDelay());
@@ -10554,96 +11272,139 @@ public Builder setQueryIdBytes(
return this;
}
- // optional string dag_name = 2;
- private java.lang.Object dagName_ = "";
+ // optional .QueryIdentifierProto query_identifier = 2;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public boolean hasDagName() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- dagName_ = s;
- return s;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ return queryIdentifier_;
} else {
- return (java.lang.String) ref;
+ return queryIdentifierBuilder_.getMessage();
}
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
+ public Builder setQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ queryIdentifier_ = value;
+ onChanged();
} else {
- return (com.google.protobuf.ByteString) ref;
+ queryIdentifierBuilder_.setMessage(value);
}
+ bitField0_ |= 0x00000002;
+ return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public Builder setDagName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- dagName_ = value;
- onChanged();
+ public Builder setQueryIdentifier(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder builderForValue) {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = builderForValue.build();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public Builder clearDagName() {
+ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) {
+ queryIdentifier_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial();
+ } else {
+ queryIdentifier_ = value;
+ }
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 2;
+ */
+ public Builder clearQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
bitField0_ = (bitField0_ & ~0x00000002);
- dagName_ = getDefaultInstance().getDagName();
- onChanged();
return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 2;
*/
- public Builder setDagNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- dagName_ = value;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder getQueryIdentifierBuilder() {
+ bitField0_ |= 0x00000002;
onChanged();
- return this;
+ return getQueryIdentifierFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 2;
+ */
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ if (queryIdentifierBuilder_ != null) {
+ return queryIdentifierBuilder_.getMessageOrBuilder();
+ } else {
+ return queryIdentifier_;
+ }
+ }
+ /**
+ * optional .QueryIdentifierProto query_identifier = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>
+ getQueryIdentifierFieldBuilder() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>(
+ queryIdentifier_,
+ getParentForChildren(),
+ isClean());
+ queryIdentifier_ = null;
+ }
+ return queryIdentifierBuilder_;
}
- // optional int64 delete_delay = 3 [default = 0];
+ // optional int64 delete_delay = 4 [default = 0];
private long deleteDelay_ ;
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public boolean hasDeleteDelay() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public long getDeleteDelay() {
return deleteDelay_;
}
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public Builder setDeleteDelay(long value) {
bitField0_ |= 0x00000004;
@@ -10652,7 +11413,7 @@ public Builder setDeleteDelay(long value) {
return this;
}
/**
- * optional int64 delete_delay = 3 [default = 0];
+ * optional int64 delete_delay = 4 [default = 0];
*/
public Builder clearDeleteDelay() {
bitField0_ = (bitField0_ & ~0x00000004);
@@ -11013,47 +11774,31 @@ public Builder mergeFrom(
public interface TerminateFragmentRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // optional string query_id = 1;
- /**
- * optional string query_id = 1;
- */
- boolean hasQueryId();
- /**
- * optional string query_id = 1;
- */
- java.lang.String getQueryId();
- /**
- * optional string query_id = 1;
- */
- com.google.protobuf.ByteString
- getQueryIdBytes();
-
- // optional string dag_name = 2;
+ // optional .QueryIdentifierProto query_identifier = 1;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- boolean hasDagName();
+ boolean hasQueryIdentifier();
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- java.lang.String getDagName();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier();
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- com.google.protobuf.ByteString
- getDagNameBytes();
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder();
- // optional string fragment_identifier_string = 7;
+ // optional string fragment_identifier_string = 2;
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
boolean hasFragmentIdentifierString();
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
java.lang.String getFragmentIdentifierString();
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
com.google.protobuf.ByteString
getFragmentIdentifierStringBytes();
@@ -11110,17 +11855,20 @@ private TerminateFragmentRequestProto(
break;
}
case 10: {
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = queryIdentifier_.toBuilder();
+ }
+ queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(queryIdentifier_);
+ queryIdentifier_ = subBuilder.buildPartial();
+ }
bitField0_ |= 0x00000001;
- queryId_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
- dagName_ = input.readBytes();
- break;
- }
- case 58: {
- bitField0_ |= 0x00000004;
fragmentIdentifierString_ = input.readBytes();
break;
}
@@ -11158,109 +11906,45 @@ public TerminateFragmentRequestProto parsePartialFrom(
}
};
- @java.lang.Override
- public com.google.protobuf.Parser getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // optional string query_id = 1;
- public static final int QUERY_ID_FIELD_NUMBER = 1;
- private java.lang.Object queryId_;
- /**
- * optional string query_id = 1;
- */
- public boolean hasQueryId() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * optional string query_id = 1;
- */
- public java.lang.String getQueryId() {
- java.lang.Object ref = queryId_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- queryId_ = s;
- }
- return s;
- }
- }
- /**
- * optional string query_id = 1;
- */
- public com.google.protobuf.ByteString
- getQueryIdBytes() {
- java.lang.Object ref = queryId_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- queryId_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
}
- // optional string dag_name = 2;
- public static final int DAG_NAME_FIELD_NUMBER = 2;
- private java.lang.Object dagName_;
+ private int bitField0_;
+ // optional .QueryIdentifierProto query_identifier = 1;
+ public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_;
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public boolean hasDagName() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
+ public boolean hasQueryIdentifier() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- dagName_ = s;
- }
- return s;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ return queryIdentifier_;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ return queryIdentifier_;
}
- // optional string fragment_identifier_string = 7;
- public static final int FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER = 7;
+ // optional string fragment_identifier_string = 2;
+ public static final int FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER = 2;
private java.lang.Object fragmentIdentifierString_;
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public boolean hasFragmentIdentifierString() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public java.lang.String getFragmentIdentifierString() {
java.lang.Object ref = fragmentIdentifierString_;
@@ -11277,7 +11961,7 @@ public boolean hasFragmentIdentifierString() {
}
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public com.google.protobuf.ByteString
getFragmentIdentifierStringBytes() {
@@ -11294,8 +11978,7 @@ public boolean hasFragmentIdentifierString() {
}
private void initFields() {
- queryId_ = "";
- dagName_ = "";
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
fragmentIdentifierString_ = "";
}
private byte memoizedIsInitialized = -1;
@@ -11311,13 +11994,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getQueryIdBytes());
+ output.writeMessage(1, queryIdentifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getDagNameBytes());
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeBytes(7, getFragmentIdentifierStringBytes());
+ output.writeBytes(2, getFragmentIdentifierStringBytes());
}
getUnknownFields().writeTo(output);
}
@@ -11330,15 +12010,11 @@ public int getSerializedSize() {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getQueryIdBytes());
+ .computeMessageSize(1, queryIdentifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getDagNameBytes());
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(7, getFragmentIdentifierStringBytes());
+ .computeBytesSize(2, getFragmentIdentifierStringBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -11363,15 +12039,10 @@ public boolean equals(final java.lang.Object obj) {
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) obj;
boolean result = true;
- result = result && (hasQueryId() == other.hasQueryId());
- if (hasQueryId()) {
- result = result && getQueryId()
- .equals(other.getQueryId());
- }
- result = result && (hasDagName() == other.hasDagName());
- if (hasDagName()) {
- result = result && getDagName()
- .equals(other.getDagName());
+ result = result && (hasQueryIdentifier() == other.hasQueryIdentifier());
+ if (hasQueryIdentifier()) {
+ result = result && getQueryIdentifier()
+ .equals(other.getQueryIdentifier());
}
result = result && (hasFragmentIdentifierString() == other.hasFragmentIdentifierString());
if (hasFragmentIdentifierString()) {
@@ -11391,13 +12062,9 @@ public int hashCode() {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasQueryId()) {
- hash = (37 * hash) + QUERY_ID_FIELD_NUMBER;
- hash = (53 * hash) + getQueryId().hashCode();
- }
- if (hasDagName()) {
- hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
- hash = (53 * hash) + getDagName().hashCode();
+ if (hasQueryIdentifier()) {
+ hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getQueryIdentifier().hashCode();
}
if (hasFragmentIdentifierString()) {
hash = (37 * hash) + FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER;
@@ -11504,6 +12171,7 @@ private Builder(
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getQueryIdentifierFieldBuilder();
}
}
private static Builder create() {
@@ -11512,12 +12180,14 @@ private static Builder create() {
public Builder clear() {
super.clear();
- queryId_ = "";
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ } else {
+ queryIdentifierBuilder_.clear();
+ }
bitField0_ = (bitField0_ & ~0x00000001);
- dagName_ = "";
- bitField0_ = (bitField0_ & ~0x00000002);
fragmentIdentifierString_ = "";
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@@ -11549,14 +12219,14 @@ public Builder clone() {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
- result.queryId_ = queryId_;
+ if (queryIdentifierBuilder_ == null) {
+ result.queryIdentifier_ = queryIdentifier_;
+ } else {
+ result.queryIdentifier_ = queryIdentifierBuilder_.build();
+ }
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- result.dagName_ = dagName_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
result.fragmentIdentifierString_ = fragmentIdentifierString_;
result.bitField0_ = to_bitField0_;
onBuilt();
@@ -11574,18 +12244,11 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other) {
if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance()) return this;
- if (other.hasQueryId()) {
- bitField0_ |= 0x00000001;
- queryId_ = other.queryId_;
- onChanged();
- }
- if (other.hasDagName()) {
- bitField0_ |= 0x00000002;
- dagName_ = other.dagName_;
- onChanged();
+ if (other.hasQueryIdentifier()) {
+ mergeQueryIdentifier(other.getQueryIdentifier());
}
if (other.hasFragmentIdentifierString()) {
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000002;
fragmentIdentifierString_ = other.fragmentIdentifierString_;
onChanged();
}
@@ -11616,164 +12279,133 @@ public Builder mergeFrom(
}
private int bitField0_;
- // optional string query_id = 1;
- private java.lang.Object queryId_ = "";
+ // optional .QueryIdentifierProto query_identifier = 1;
+ private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_;
/**
- * optional string query_id = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public boolean hasQueryId() {
+ public boolean hasQueryIdentifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional string query_id = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getQueryId() {
- java.lang.Object ref = queryId_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- queryId_ = s;
- return s;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ return queryIdentifier_;
} else {
- return (java.lang.String) ref;
+ return queryIdentifierBuilder_.getMessage();
}
}
/**
- * optional string query_id = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getQueryIdBytes() {
- java.lang.Object ref = queryId_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- queryId_ = b;
- return b;
+ public Builder setQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ queryIdentifier_ = value;
+ onChanged();
} else {
- return (com.google.protobuf.ByteString) ref;
+ queryIdentifierBuilder_.setMessage(value);
}
- }
- /**
- * optional string query_id = 1;
- */
- public Builder setQueryId(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- queryId_ = value;
- onChanged();
- return this;
- }
- /**
- * optional string query_id = 1;
- */
- public Builder clearQueryId() {
- bitField0_ = (bitField0_ & ~0x00000001);
- queryId_ = getDefaultInstance().getQueryId();
- onChanged();
+ bitField0_ |= 0x00000001;
return this;
}
/**
- * optional string query_id = 1;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setQueryIdBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- queryId_ = value;
- onChanged();
+ public Builder setQueryIdentifier(
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder builderForValue) {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = builderForValue.build();
+ onChanged();
+ } else {
+ queryIdentifierBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
return this;
}
-
- // optional string dag_name = 2;
- private java.lang.Object dagName_ = "";
- /**
- * optional string dag_name = 2;
- */
- public boolean hasDagName() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public java.lang.String getDagName() {
- java.lang.Object ref = dagName_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- dagName_ = s;
- return s;
+ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) {
+ if (queryIdentifierBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) {
+ queryIdentifier_ =
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial();
+ } else {
+ queryIdentifier_ = value;
+ }
+ onChanged();
} else {
- return (java.lang.String) ref;
+ queryIdentifierBuilder_.mergeFrom(value);
}
+ bitField0_ |= 0x00000001;
+ return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public com.google.protobuf.ByteString
- getDagNameBytes() {
- java.lang.Object ref = dagName_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- dagName_ = b;
- return b;
+ public Builder clearQueryIdentifier() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance();
+ onChanged();
} else {
- return (com.google.protobuf.ByteString) ref;
+ queryIdentifierBuilder_.clear();
}
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setDagName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- dagName_ = value;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder getQueryIdentifierBuilder() {
+ bitField0_ |= 0x00000001;
onChanged();
- return this;
+ return getQueryIdentifierFieldBuilder().getBuilder();
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder clearDagName() {
- bitField0_ = (bitField0_ & ~0x00000002);
- dagName_ = getDefaultInstance().getDagName();
- onChanged();
- return this;
+ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() {
+ if (queryIdentifierBuilder_ != null) {
+ return queryIdentifierBuilder_.getMessageOrBuilder();
+ } else {
+ return queryIdentifier_;
+ }
}
/**
- * optional string dag_name = 2;
+ * optional .QueryIdentifierProto query_identifier = 1;
*/
- public Builder setDagNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- dagName_ = value;
- onChanged();
- return this;
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>
+ getQueryIdentifierFieldBuilder() {
+ if (queryIdentifierBuilder_ == null) {
+ queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>(
+ queryIdentifier_,
+ getParentForChildren(),
+ isClean());
+ queryIdentifier_ = null;
+ }
+ return queryIdentifierBuilder_;
}
- // optional string fragment_identifier_string = 7;
+ // optional string fragment_identifier_string = 2;
private java.lang.Object fragmentIdentifierString_ = "";
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public boolean hasFragmentIdentifierString() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public java.lang.String getFragmentIdentifierString() {
java.lang.Object ref = fragmentIdentifierString_;
@@ -11787,7 +12419,7 @@ public boolean hasFragmentIdentifierString() {
}
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public com.google.protobuf.ByteString
getFragmentIdentifierStringBytes() {
@@ -11803,36 +12435,36 @@ public boolean hasFragmentIdentifierString() {
}
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public Builder setFragmentIdentifierString(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000002;
fragmentIdentifierString_ = value;
onChanged();
return this;
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public Builder clearFragmentIdentifierString() {
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000002);
fragmentIdentifierString_ = getDefaultInstance().getFragmentIdentifierString();
onChanged();
return this;
}
/**
- * optional string fragment_identifier_string = 7;
+ * optional string fragment_identifier_string = 2;
*/
public Builder setFragmentIdentifierStringBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000002;
fragmentIdentifierString_ = value;
onChanged();
return this;
@@ -13670,6 +14302,11 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_FragmentRuntimeInfo_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_QueryIdentifierProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_QueryIdentifierProto_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_SubmitWorkRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -13739,58 +14376,62 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
"roupInputSpecProto\022\022\n\ngroup_name\030\001 \001(\t\022\026" +
"\n\016group_vertices\030\002 \003(\t\0227\n\027merged_input_d",
"escriptor\030\003 \001(\0132\026.EntityDescriptorProto\"" +
- "\333\002\n\021FragmentSpecProto\022\"\n\032fragment_identi" +
- "fier_string\030\001 \001(\t\022\020\n\010dag_name\030\002 \001(\t\022\023\n\013v" +
- "ertex_name\030\003 \001(\t\0224\n\024processor_descriptor" +
- "\030\004 \001(\0132\026.EntityDescriptorProto\022!\n\013input_" +
- "specs\030\005 \003(\0132\014.IOSpecProto\022\"\n\014output_spec" +
- "s\030\006 \003(\0132\014.IOSpecProto\0221\n\023grouped_input_s" +
- "pecs\030\007 \003(\0132\024.GroupInputSpecProto\022\032\n\022vert" +
- "ex_parallelism\030\010 \001(\005\022\027\n\017fragment_number\030" +
- "\t \001(\005\022\026\n\016attempt_number\030\n \001(\005\"\344\001\n\023Fragme",
- "ntRuntimeInfo\022#\n\033num_self_and_upstream_t" +
- "asks\030\001 \001(\005\022-\n%num_self_and_upstream_comp" +
- "leted_tasks\030\002 \001(\005\022\033\n\023within_dag_priority" +
- "\030\003 \001(\005\022\026\n\016dag_start_time\030\004 \001(\003\022 \n\030first_" +
- "attempt_start_time\030\005 \001(\003\022\"\n\032current_atte" +
- "mpt_start_time\030\006 \001(\003\"\266\002\n\026SubmitWorkReque" +
- "stProto\022\033\n\023container_id_string\030\001 \001(\t\022\017\n\007" +
- "am_host\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030\n\020token_" +
- "identifier\030\004 \001(\t\022\032\n\022credentials_binary\030\005" +
- " \001(\014\022\014\n\004user\030\006 \001(\t\022\035\n\025application_id_str",
- "ing\030\007 \001(\t\022\032\n\022app_attempt_number\030\010 \001(\005\022)\n" +
- "\rfragment_spec\030\t \001(\0132\022.FragmentSpecProto" +
- "\0223\n\025fragment_runtime_info\030\n \001(\0132\024.Fragme" +
- "ntRuntimeInfo\"J\n\027SubmitWorkResponseProto" +
- "\022/\n\020submission_state\030\001 \001(\0162\025.SubmissionS" +
- "tateProto\"f\n\036SourceStateUpdatedRequestPr" +
- "oto\022\020\n\010dag_name\030\001 \001(\t\022\020\n\010src_name\030\002 \001(\t\022" +
+ "\353\002\n\021FragmentSpecProto\022\"\n\032fragment_identi" +
+ "fier_string\030\001 \001(\t\022\020\n\010dag_name\030\002 \001(\t\022\016\n\006d" +
+ "ag_id\030\013 \001(\005\022\023\n\013vertex_name\030\003 \001(\t\0224\n\024proc" +
+ "essor_descriptor\030\004 \001(\0132\026.EntityDescripto" +
+ "rProto\022!\n\013input_specs\030\005 \003(\0132\014.IOSpecProt" +
+ "o\022\"\n\014output_specs\030\006 \003(\0132\014.IOSpecProto\0221\n" +
+ "\023grouped_input_specs\030\007 \003(\0132\024.GroupInputS" +
+ "pecProto\022\032\n\022vertex_parallelism\030\010 \001(\005\022\027\n\017" +
+ "fragment_number\030\t \001(\005\022\026\n\016attempt_number\030",
+ "\n \001(\005\"\344\001\n\023FragmentRuntimeInfo\022#\n\033num_sel" +
+ "f_and_upstream_tasks\030\001 \001(\005\022-\n%num_self_a" +
+ "nd_upstream_completed_tasks\030\002 \001(\005\022\033\n\023wit" +
+ "hin_dag_priority\030\003 \001(\005\022\026\n\016dag_start_time" +
+ "\030\004 \001(\003\022 \n\030first_attempt_start_time\030\005 \001(\003" +
+ "\022\"\n\032current_attempt_start_time\030\006 \001(\003\"F\n\024" +
+ "QueryIdentifierProto\022\026\n\016app_identifier\030\001" +
+ " \001(\t\022\026\n\016dag_identifier\030\002 \001(\005\"\266\002\n\026SubmitW" +
+ "orkRequestProto\022\033\n\023container_id_string\030\001" +
+ " \001(\t\022\017\n\007am_host\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030",
+ "\n\020token_identifier\030\004 \001(\t\022\032\n\022credentials_" +
+ "binary\030\005 \001(\014\022\014\n\004user\030\006 \001(\t\022\035\n\025applicatio" +
+ "n_id_string\030\007 \001(\t\022\032\n\022app_attempt_number\030" +
+ "\010 \001(\005\022)\n\rfragment_spec\030\t \001(\0132\022.FragmentS" +
+ "pecProto\0223\n\025fragment_runtime_info\030\n \001(\0132" +
+ "\024.FragmentRuntimeInfo\"J\n\027SubmitWorkRespo" +
+ "nseProto\022/\n\020submission_state\030\001 \001(\0162\025.Sub" +
+ "missionStateProto\"\205\001\n\036SourceStateUpdated" +
+ "RequestProto\022/\n\020query_identifier\030\001 \001(\0132\025" +
+ ".QueryIdentifierProto\022\020\n\010src_name\030\002 \001(\t\022",
" \n\005state\030\003 \001(\0162\021.SourceStateProto\"!\n\037Sou" +
- "rceStateUpdatedResponseProto\"X\n\031QueryCom" +
- "pleteRequestProto\022\020\n\010query_id\030\001 \001(\t\022\020\n\010d",
- "ag_name\030\002 \001(\t\022\027\n\014delete_delay\030\003 \001(\003:\0010\"\034" +
- "\n\032QueryCompleteResponseProto\"g\n\035Terminat" +
- "eFragmentRequestProto\022\020\n\010query_id\030\001 \001(\t\022" +
- "\020\n\010dag_name\030\002 \001(\t\022\"\n\032fragment_identifier" +
- "_string\030\007 \001(\t\" \n\036TerminateFragmentRespon" +
- "seProto\"\026\n\024GetTokenRequestProto\"&\n\025GetTo" +
- "kenResponseProto\022\r\n\005token\030\001 \001(\014*2\n\020Sourc" +
- "eStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNIN" +
- "G\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEPTED\020" +
- "\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\316\002\n\022L",
- "lapDaemonProtocol\022?\n\nsubmitWork\022\027.Submit" +
- "WorkRequestProto\032\030.SubmitWorkResponsePro" +
- "to\022W\n\022sourceStateUpdated\022\037.SourceStateUp" +
- "datedRequestProto\032 .SourceStateUpdatedRe" +
- "sponseProto\022H\n\rqueryComplete\022\032.QueryComp" +
- "leteRequestProto\032\033.QueryCompleteResponse" +
- "Proto\022T\n\021terminateFragment\022\036.TerminateFr" +
- "agmentRequestProto\032\037.TerminateFragmentRe" +
- "sponseProto2]\n\026LlapManagementProtocol\022C\n" +
- "\022getDelegationToken\022\025.GetTokenRequestPro",
- "to\032\026.GetTokenResponseProtoBH\n&org.apache" +
- ".hadoop.hive.llap.daemon.rpcB\030LlapDaemon" +
- "ProtocolProtos\210\001\001\240\001\001"
+ "rceStateUpdatedResponseProto\"w\n\031QueryCom" +
+ "pleteRequestProto\022\020\n\010query_id\030\001 \001(\t\022/\n\020q" +
+ "uery_identifier\030\002 \001(\0132\025.QueryIdentifierP" +
+ "roto\022\027\n\014delete_delay\030\004 \001(\003:\0010\"\034\n\032QueryCo" +
+ "mpleteResponseProto\"t\n\035TerminateFragment" +
+ "RequestProto\022/\n\020query_identifier\030\001 \001(\0132\025" +
+ ".QueryIdentifierProto\022\"\n\032fragment_identi" +
+ "fier_string\030\002 \001(\t\" \n\036TerminateFragmentRe" +
+ "sponseProto\"\026\n\024GetTokenRequestProto\"&\n\025G",
+ "etTokenResponseProto\022\r\n\005token\030\001 \001(\014*2\n\020S" +
+ "ourceStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RU" +
+ "NNING\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEP" +
+ "TED\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\316" +
+ "\002\n\022LlapDaemonProtocol\022?\n\nsubmitWork\022\027.Su" +
+ "bmitWorkRequestProto\032\030.SubmitWorkRespons" +
+ "eProto\022W\n\022sourceStateUpdated\022\037.SourceSta" +
+ "teUpdatedRequestProto\032 .SourceStateUpdat" +
+ "edResponseProto\022H\n\rqueryComplete\022\032.Query" +
+ "CompleteRequestProto\032\033.QueryCompleteResp",
+ "onseProto\022T\n\021terminateFragment\022\036.Termina" +
+ "teFragmentRequestProto\032\037.TerminateFragme" +
+ "ntResponseProto2]\n\026LlapManagementProtoco" +
+ "l\022C\n\022getDelegationToken\022\025.GetTokenReques" +
+ "tProto\032\026.GetTokenResponseProtoBH\n&org.ap" +
+ "ache.hadoop.hive.llap.daemon.rpcB\030LlapDa" +
+ "emonProtocolProtos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -13826,69 +14467,75 @@ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
internal_static_FragmentSpecProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_FragmentSpecProto_descriptor,
- new java.lang.String[] { "FragmentIdentifierString", "DagName", "VertexName", "ProcessorDescriptor", "InputSpecs", "OutputSpecs", "GroupedInputSpecs", "VertexParallelism", "FragmentNumber", "AttemptNumber", });
+ new java.lang.String[] { "FragmentIdentifierString", "DagName", "DagId", "VertexName", "ProcessorDescriptor", "InputSpecs", "OutputSpecs", "GroupedInputSpecs", "VertexParallelism", "FragmentNumber", "AttemptNumber", });
internal_static_FragmentRuntimeInfo_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_FragmentRuntimeInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_FragmentRuntimeInfo_descriptor,
new java.lang.String[] { "NumSelfAndUpstreamTasks", "NumSelfAndUpstreamCompletedTasks", "WithinDagPriority", "DagStartTime", "FirstAttemptStartTime", "CurrentAttemptStartTime", });
- internal_static_SubmitWorkRequestProto_descriptor =
+ internal_static_QueryIdentifierProto_descriptor =
getDescriptor().getMessageTypes().get(6);
+ internal_static_QueryIdentifierProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_QueryIdentifierProto_descriptor,
+ new java.lang.String[] { "AppIdentifier", "DagIdentifier", });
+ internal_static_SubmitWorkRequestProto_descriptor =
+ getDescriptor().getMessageTypes().get(7);
internal_static_SubmitWorkRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SubmitWorkRequestProto_descriptor,
new java.lang.String[] { "ContainerIdString", "AmHost", "AmPort", "TokenIdentifier", "CredentialsBinary", "User", "ApplicationIdString", "AppAttemptNumber", "FragmentSpec", "FragmentRuntimeInfo", });
internal_static_SubmitWorkResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_SubmitWorkResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SubmitWorkResponseProto_descriptor,
new java.lang.String[] { "SubmissionState", });
internal_static_SourceStateUpdatedRequestProto_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_SourceStateUpdatedRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SourceStateUpdatedRequestProto_descriptor,
- new java.lang.String[] { "DagName", "SrcName", "State", });
+ new java.lang.String[] { "QueryIdentifier", "SrcName", "State", });
internal_static_SourceStateUpdatedResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SourceStateUpdatedResponseProto_descriptor,
new java.lang.String[] { });
internal_static_QueryCompleteRequestProto_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_QueryCompleteRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_QueryCompleteRequestProto_descriptor,
- new java.lang.String[] { "QueryId", "DagName", "DeleteDelay", });
+ new java.lang.String[] { "QueryId", "QueryIdentifier", "DeleteDelay", });
internal_static_QueryCompleteResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_QueryCompleteResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_QueryCompleteResponseProto_descriptor,
new java.lang.String[] { });
internal_static_TerminateFragmentRequestProto_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_TerminateFragmentRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TerminateFragmentRequestProto_descriptor,
- new java.lang.String[] { "QueryId", "DagName", "FragmentIdentifierString", });
+ new java.lang.String[] { "QueryIdentifier", "FragmentIdentifierString", });
internal_static_TerminateFragmentResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_TerminateFragmentResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TerminateFragmentResponseProto_descriptor,
new java.lang.String[] { });
internal_static_GetTokenRequestProto_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_GetTokenRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetTokenRequestProto_descriptor,
new java.lang.String[] { });
internal_static_GetTokenResponseProto_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_GetTokenResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetTokenResponseProto_descriptor,
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index ab4df5dbd9eac092b8691bcdfdf4aacbfbf80ce8..824ff33bc7d233236ed75d6d7ad42b230e9205c2 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -17,10 +17,10 @@
*/
package org.apache.hadoop.hive.llap.cache;
+import com.google.common.annotations.VisibleForTesting;
import java.nio.ByteBuffer;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -44,13 +44,23 @@
// We don't know the acceptable size for Java array, so we'll use 1Gb boundary.
// That is guaranteed to fit any maximum allocation.
private static final int MAX_ARENA_SIZE = 1024*1024*1024;
- public BuddyAllocator(Configuration conf, MemoryManager memoryManager,
- LlapDaemonCacheMetrics metrics) {
- isDirect = HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOCATOR_DIRECT);
- minAllocation = HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC);
- maxAllocation = HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_MAX_ALLOC);
- int arenaCount = HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_ARENA_COUNT);
- long maxSizeVal = HiveConf.getLongVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
+
+
+ public BuddyAllocator(Configuration conf, MemoryManager mm, LlapDaemonCacheMetrics metrics) {
+ this(HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOCATOR_DIRECT),
+ (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC),
+ (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MAX_ALLOC),
+ HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_ARENA_COUNT),
+ HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE),
+ mm, metrics);
+ }
+
+ @VisibleForTesting
+ public BuddyAllocator(boolean isDirectVal, int minAllocVal, int maxAllocVal, int arenaCount,
+ long maxSizeVal, MemoryManager memoryManager, LlapDaemonCacheMetrics metrics) {
+ isDirect = isDirectVal;
+ minAllocation = minAllocVal;
+ maxAllocation = maxAllocVal;
int arenaSizeVal = (arenaCount == 0) ? MAX_ARENA_SIZE : (int)(maxSizeVal / arenaCount);
arenaSizeVal = Math.max(maxAllocation, Math.min(arenaSizeVal, MAX_ARENA_SIZE));
if (LlapIoImpl.LOG.isInfoEnabled()) {
@@ -60,7 +70,7 @@ public BuddyAllocator(Configuration conf, MemoryManager memoryManager,
}
if (minAllocation < 8) {
- throw new AssertionError("Min allocation must be at least 8: " + minAllocation);
+ throw new AssertionError("Min allocation must be at least 8 bytes: " + minAllocation);
}
if (maxSizeVal < arenaSizeVal || maxAllocation < minAllocation) {
throw new AssertionError("Inconsistent sizes of cache, arena and allocations: "
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
index 992da8e78599174becf53063d310821369fca2a9..1cfe2bc4fa839d49e447408c1fd20cf85bc9a03f 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hive.llap.cache;
+import com.google.common.annotations.VisibleForTesting;
import java.util.concurrent.atomic.AtomicLong;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -37,9 +37,15 @@
private final LlapDaemonCacheMetrics metrics;
private long maxSize;
- public LowLevelCacheMemoryManager(Configuration conf, LowLevelCachePolicy evictor,
- LlapDaemonCacheMetrics metrics) {
- this.maxSize = HiveConf.getLongVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
+ public LowLevelCacheMemoryManager(
+ Configuration conf, LowLevelCachePolicy evictor, LlapDaemonCacheMetrics metrics) {
+ this(HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE), evictor, metrics);
+ }
+
+ @VisibleForTesting
+ public LowLevelCacheMemoryManager(
+ long maxSize, LowLevelCachePolicy evictor, LlapDaemonCacheMetrics metrics) {
+ this.maxSize = maxSize;
this.evictor = evictor;
this.usedMemory = new AtomicLong(0);
this.metrics = metrics;
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
index 84910d74269f61e010a0ce1061d2d3591d93e745..6f52b86dca067b55a934c58c0c66ccc984bd240c 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.hive.llap.cache;
+import com.google.common.annotations.VisibleForTesting;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
-
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -67,8 +67,12 @@ private final double expirePriority(long time, long lastAccess, double previous)
private LlapOomDebugDump parentDebugDump;
public LowLevelLrfuCachePolicy(Configuration conf) {
- long maxSize = HiveConf.getLongVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
- int minBufferSize = HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC);
+ this((int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC),
+ HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE), conf);
+ }
+
+ @VisibleForTesting
+ public LowLevelLrfuCachePolicy(int minBufferSize, long maxSize, Configuration conf) {
lambda = HiveConf.getFloatVar(conf, HiveConf.ConfVars.LLAP_LRFU_LAMBDA);
int maxBuffers = (int)Math.ceil((maxSize * 1.0) / minBufferSize);
int maxHeapSize = -1;
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
index d6e1a6e68630f73ee5cf3acd6dc2acb5b01202a2..544af0952d76d8194aff1e204a5a67b9d8fee046 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
@@ -172,7 +172,8 @@ private void run(String[] args) throws Exception {
}
if (options.getCache() != -1) {
- conf.setLong(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname, options.getCache());
+ conf.set(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname,
+ Long.toString(options.getCache()));
}
if (options.getXmx() != -1) {
@@ -271,14 +272,25 @@ private void run(String[] args) throws Exception {
lfs.copyFromLocalFile(new Path(logger.toString()), confPath);
+ String java_home = System.getenv("JAVA_HOME");
+ String jre_home = System.getProperty("java.home");
+ if (java_home == null) {
+ java_home = jre_home;
+ } else if (!java_home.equals(jre_home)) {
+ LOG.warn("Java versions might not match : JAVA_HOME=%s,process jre=%s",
+ java_home, jre_home);
+ }
+
// extract configs for processing by the python fragments in Slider
JSONObject configs = new JSONObject();
+ configs.put("java.home", java_home);
+
configs.put(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname, HiveConf.getIntVar(conf,
ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB));
configs.put(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname,
- HiveConf.getLongVar(conf, HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE));
+ HiveConf.getSizeVar(conf, HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE));
configs.put(HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT.varname,
HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT));
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
index 3c9ad2427c2d9f5ca3c6dcea892bc29b7e8ac891..f1fc28525519d3085316a440a205d231c8839f0f 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/HistoryLogger.java
@@ -27,6 +27,7 @@
private static final String HISTORY_START_TIME = "StartTime";
private static final String HISTORY_END_TIME = "EndTime";
private static final String HISTORY_DAG_NAME = "DagName";
+ private static final String HISTORY_DAG_ID = "DagId";
private static final String HISTORY_VERTEX_NAME = "VertexName";
private static final String HISTORY_TASK_ID = "TaskId";
private static final String HISTORY_ATTEMPT_ID = "TaskAttemptId";
@@ -41,29 +42,30 @@
public static void logFragmentStart(String applicationIdStr, String containerIdStr,
String hostname,
- String dagName, String vertexName, int taskId,
+ String dagName, int dagIdentifier, String vertexName, int taskId,
int attemptId) {
HISTORY_LOGGER.info(
- constructFragmentStartString(applicationIdStr, containerIdStr, hostname, dagName,
+ constructFragmentStartString(applicationIdStr, containerIdStr, hostname, dagName, dagIdentifier,
vertexName, taskId, attemptId));
}
public static void logFragmentEnd(String applicationIdStr, String containerIdStr, String hostname,
- String dagName, String vertexName, int taskId, int attemptId,
+ String dagName, int dagIdentifier, String vertexName, int taskId, int attemptId,
String threadName, long startTime, boolean failed) {
HISTORY_LOGGER.info(constructFragmentEndString(applicationIdStr, containerIdStr, hostname,
- dagName, vertexName, taskId, attemptId, threadName, startTime, failed));
+ dagName, dagIdentifier, vertexName, taskId, attemptId, threadName, startTime, failed));
}
private static String constructFragmentStartString(String applicationIdStr, String containerIdStr,
- String hostname, String dagName,
+ String hostname, String dagName, int dagIdentifier,
String vertexName, int taskId, int attemptId) {
HistoryLineBuilder lb = new HistoryLineBuilder(EVENT_TYPE_FRAGMENT_START);
lb.addHostName(hostname);
lb.addAppid(applicationIdStr);
lb.addContainerId(containerIdStr);
lb.addDagName(dagName);
+ lb.addDagId(dagIdentifier);
lb.addVertexName(vertexName);
lb.addTaskId(taskId);
lb.addTaskAttemptId(attemptId);
@@ -72,7 +74,7 @@ private static String constructFragmentStartString(String applicationIdStr, Stri
}
private static String constructFragmentEndString(String applicationIdStr, String containerIdStr,
- String hostname, String dagName,
+ String hostname, String dagName, int dagIdentifier,
String vertexName, int taskId, int attemptId,
String threadName, long startTime, boolean succeeded) {
HistoryLineBuilder lb = new HistoryLineBuilder(EVENT_TYPE_FRAGMENT_END);
@@ -80,6 +82,7 @@ private static String constructFragmentEndString(String applicationIdStr, String
lb.addAppid(applicationIdStr);
lb.addContainerId(containerIdStr);
lb.addDagName(dagName);
+ lb.addDagId(dagIdentifier);
lb.addVertexName(vertexName);
lb.addTaskId(taskId);
lb.addTaskAttemptId(attemptId);
@@ -113,6 +116,10 @@ HistoryLineBuilder addDagName(String dagName) {
return setKeyValue(HISTORY_DAG_NAME, dagName);
}
+ HistoryLineBuilder addDagId(int dagId) {
+ return setKeyValue(HISTORY_DAG_ID, String.valueOf(dagId));
+ }
+
HistoryLineBuilder addVertexName(String vertexName) {
return setKeyValue(HISTORY_VERTEX_NAME, vertexName);
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/KilledTaskHandler.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/KilledTaskHandler.java
index 7cb433bfd9c10ec678fbd022fb634aa9ee0643ea..e2caec27ea22548026de3fae42852fc2c62389e1 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/KilledTaskHandler.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/KilledTaskHandler.java
@@ -14,6 +14,7 @@
package org.apache.hadoop.hive.llap.daemon;
+import org.apache.hadoop.hive.llap.daemon.impl.QueryIdentifier;
import org.apache.hadoop.security.token.Token;
import org.apache.tez.common.security.JobTokenIdentifier;
import org.apache.tez.dag.records.TezTaskAttemptID;
@@ -24,6 +25,6 @@
// inferred from this.
// Passing in parameters until there's some dag information stored and tracked in the daemon.
void taskKilled(String amLocation, int port, String user,
- Token jobToken, String queryId, String dagName,
+ Token jobToken, QueryIdentifier queryIdentifier,
TezTaskAttemptID taskAttemptId);
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/QueryFailedHandler.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/QueryFailedHandler.java
index 4e62a68f763c3c43324b2034885d260b8b214c9c..7f9553d9fe70212763a5a1dc7bd723cd63c44720 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/QueryFailedHandler.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/QueryFailedHandler.java
@@ -14,7 +14,9 @@
package org.apache.hadoop.hive.llap.daemon;
+import org.apache.hadoop.hive.llap.daemon.impl.QueryIdentifier;
+
public interface QueryFailedHandler {
- public void queryFailed(String queryId, String dagName);
+ public void queryFailed(QueryIdentifier queryIdentifier);
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
index f6711d80acf847f1babd6ef522b332a21a7ada61..d1ec71559a08ba4f3da5e91341a115ff86fdd400 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
@@ -172,9 +172,9 @@ public void serviceStop() {
}
public void registerTask(String amLocation, int port, String user,
- Token jobToken, String queryId, String dagName) {
+ Token jobToken, QueryIdentifier queryIdentifier) {
if (LOG.isTraceEnabled()) {
- LOG.trace("Registering for heartbeat: " + amLocation + ":" + port + " for dagName=" + dagName);
+ LOG.trace("Registering for heartbeat: " + amLocation + ":" + port + " for queryIdentifier=" + queryIdentifier);
}
AMNodeInfo amNodeInfo;
synchronized (knownAppMasters) {
@@ -182,7 +182,7 @@ public void registerTask(String amLocation, int port, String user,
amNodeInfo = knownAppMasters.get(amNodeId);
if (amNodeInfo == null) {
amNodeInfo =
- new AMNodeInfo(amNodeId, user, jobToken, dagName, retryPolicy, retryTimeout, socketFactory,
+ new AMNodeInfo(amNodeId, user, jobToken, queryIdentifier, retryPolicy, retryTimeout, socketFactory,
conf);
knownAppMasters.put(amNodeId, amNodeInfo);
// Add to the queue only the first time this is registered, and on
@@ -190,7 +190,7 @@ public void registerTask(String amLocation, int port, String user,
amNodeInfo.setNextHeartbeatTime(System.currentTimeMillis() + heartbeatInterval);
pendingHeartbeatQueeu.add(amNodeInfo);
}
- amNodeInfo.setCurrentDagName(dagName);
+ amNodeInfo.setCurrentQueryIdentifier(queryIdentifier);
amNodeInfo.incrementAndGetTaskCount();
}
}
@@ -214,12 +214,12 @@ public void unregisterTask(String amLocation, int port) {
}
public void taskKilled(String amLocation, int port, String user, Token jobToken,
- final String queryId, final String dagName, final TezTaskAttemptID taskAttemptId) {
+ final QueryIdentifier queryIdentifier, final TezTaskAttemptID taskAttemptId) {
// Not re-using the connection for the AM heartbeat - which may or may not be open by this point.
// knownAppMasters is used for sending heartbeats for queued tasks. Killed messages use a new connection.
LlapNodeId amNodeId = LlapNodeId.getInstance(amLocation, port);
AMNodeInfo amNodeInfo =
- new AMNodeInfo(amNodeId, user, jobToken, dagName, retryPolicy, retryTimeout, socketFactory,
+ new AMNodeInfo(amNodeId, user, jobToken, queryIdentifier, retryPolicy, retryTimeout, socketFactory,
conf);
// Even if the service hasn't started up. It's OK to make this invocation since this will
@@ -251,8 +251,8 @@ protected Void callInternal() {
synchronized (knownAppMasters) {
if (LOG.isDebugEnabled()) {
LOG.debug(
- "Removing am {} with last associated dag{} from heartbeat with taskCount={}, amFailed={}",
- amNodeInfo.amNodeId, amNodeInfo.getCurrentDagName(), amNodeInfo.getTaskCount(),
+ "Removing am {} with last associated dag {} from heartbeat with taskCount={}, amFailed={}",
+ amNodeInfo.amNodeId, amNodeInfo.getCurrentQueryIdentifier(), amNodeInfo.getTaskCount(),
amNodeInfo.hasAmFailed(), amNodeInfo);
}
knownAppMasters.remove(amNodeInfo.amNodeId);
@@ -272,11 +272,11 @@ public void onSuccess(Void result) {
@Override
public void onFailure(Throwable t) {
- String currentDagName = amNodeInfo.getCurrentDagName();
+ QueryIdentifier currentQueryIdentifier = amNodeInfo.getCurrentQueryIdentifier();
amNodeInfo.setAmFailed(true);
LOG.warn("Heartbeat failed to AM {}. Killing all other tasks for the query={}",
- amNodeInfo.amNodeId, currentDagName, t);
- queryFailedHandler.queryFailed(null, currentDagName);
+ amNodeInfo.amNodeId, currentQueryIdentifier, t);
+ queryFailedHandler.queryFailed(currentQueryIdentifier);
}
});
}
@@ -339,11 +339,11 @@ protected Void callInternal() {
amNodeInfo.getUmbilical().nodeHeartbeat(new Text(nodeId.getHostname()),
nodeId.getPort());
} catch (IOException e) {
- String currentDagName = amNodeInfo.getCurrentDagName();
+ QueryIdentifier currentQueryIdentifier = amNodeInfo.getCurrentQueryIdentifier();
amNodeInfo.setAmFailed(true);
LOG.warn("Failed to communicated with AM at {}. Killing remaining fragments for query {}",
- amNodeInfo.amNodeId, currentDagName, e);
- queryFailedHandler.queryFailed(null, currentDagName);
+ amNodeInfo.amNodeId, currentQueryIdentifier, e);
+ queryFailedHandler.queryFailed(currentQueryIdentifier);
} catch (InterruptedException e) {
if (!isShutdown.get()) {
LOG.warn("Interrupted while trying to send heartbeat to AM {}", amNodeInfo.amNodeId, e);
@@ -370,21 +370,21 @@ protected Void callInternal() {
private final long timeout;
private final SocketFactory socketFactory;
private final AtomicBoolean amFailed = new AtomicBoolean(false);
- private String currentDagName;
+ private QueryIdentifier currentQueryIdentifier;
private LlapTaskUmbilicalProtocol umbilical;
private long nextHeartbeatTime;
public AMNodeInfo(LlapNodeId amNodeId, String user,
Token jobToken,
- String currentDagName,
+ QueryIdentifier currentQueryIdentifier,
RetryPolicy retryPolicy,
long timeout,
SocketFactory socketFactory,
Configuration conf) {
this.user = user;
this.jobToken = jobToken;
- this.currentDagName = currentDagName;
+ this.currentQueryIdentifier = currentQueryIdentifier;
this.retryPolicy = retryPolicy;
this.timeout = timeout;
this.socketFactory = socketFactory;
@@ -439,12 +439,12 @@ int getTaskCount() {
return taskCount.get();
}
- public synchronized String getCurrentDagName() {
- return currentDagName;
+ public synchronized QueryIdentifier getCurrentQueryIdentifier() {
+ return currentQueryIdentifier;
}
- public synchronized void setCurrentDagName(String currentDagName) {
- this.currentDagName = currentDagName;
+ public synchronized void setCurrentQueryIdentifier(QueryIdentifier queryIdentifier) {
+ this.currentQueryIdentifier = queryIdentifier;
}
synchronized void setNextHeartbeatTime(long nextTime) {
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index 0d85671381e00510959cb5e4ba5fd9f2220fbdf6..535fe7679181ff41919ed947da5e2e1d1d29b807 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -60,6 +60,8 @@
import org.apache.tez.dag.api.TezConfiguration;
import org.apache.tez.dag.api.TezConstants;
import org.apache.tez.dag.records.TezTaskAttemptID;
+import org.apache.tez.hadoop.shim.HadoopShim;
+import org.apache.tez.hadoop.shim.HadoopShimsLoader;
import org.apache.tez.runtime.api.impl.ExecutionContextImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -85,12 +87,13 @@
private final Configuration conf;
private final TaskRunnerCallable.ConfParams confParams;
private final KilledTaskHandler killedTaskHandler = new KilledTaskHandlerImpl();
+ private final HadoopShim tezHadoopShim;
public ContainerRunnerImpl(Configuration conf, int numExecutors, int waitQueueSize,
boolean enablePreemption, String[] localDirsBase, AtomicReference localShufflePort,
AtomicReference localAddress,
long totalMemoryAvailableBytes, LlapDaemonExecutorMetrics metrics,
- AMReporter amReporter) {
+ AMReporter amReporter, ClassLoader classLoader) {
super("ContainerRunnerImpl");
this.conf = conf;
Preconditions.checkState(numExecutors > 0,
@@ -103,8 +106,8 @@ public ContainerRunnerImpl(Configuration conf, int numExecutors, int waitQueueSi
addIfService(queryTracker);
String waitQueueSchedulerClassName = HiveConf.getVar(
conf, ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME);
- this.executorService = new TaskExecutorService(numExecutors, waitQueueSize, waitQueueSchedulerClassName,
- enablePreemption);
+ this.executorService = new TaskExecutorService(numExecutors, waitQueueSize,
+ waitQueueSchedulerClassName, enablePreemption, classLoader);
addIfService(executorService);
@@ -122,6 +125,7 @@ public ContainerRunnerImpl(Configuration conf, int numExecutors, int waitQueueSi
conf.getInt(TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT,
TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT_DEFAULT)
);
+ tezHadoopShim = new HadoopShimsLoader(conf).getHadoopShim();
LOG.info("ContainerRunnerImpl config: " +
"memoryPerExecutorDerviced=" + memoryPerExecutor
@@ -149,7 +153,7 @@ protected void serviceStop() throws Exception {
@Override
public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException {
HistoryLogger.logFragmentStart(request.getApplicationIdString(), request.getContainerIdString(),
- localAddress.get().getHostName(), request.getFragmentSpec().getDagName(),
+ localAddress.get().getHostName(), request.getFragmentSpec().getDagName(), request.getFragmentSpec().getDagId(),
request.getFragmentSpec().getVertexName(), request.getFragmentSpec().getFragmentNumber(),
request.getFragmentSpec().getAttemptNumber());
if (LOG.isInfoEnabled()) {
@@ -172,8 +176,10 @@ public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws
fragmentSpec.getFragmentIdentifierString());
int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
+ QueryIdentifier queryIdentifier = new QueryIdentifier(request.getApplicationIdString(), dagIdentifier);
+
QueryFragmentInfo fragmentInfo = queryTracker
- .registerFragment(null, request.getApplicationIdString(), fragmentSpec.getDagName(),
+ .registerFragment(queryIdentifier, request.getApplicationIdString(), fragmentSpec.getDagName(),
dagIdentifier,
fragmentSpec.getVertexName(), fragmentSpec.getFragmentNumber(),
fragmentSpec.getAttemptNumber(), request.getUser(), request.getFragmentSpec());
@@ -205,7 +211,7 @@ public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws
TaskRunnerCallable callable = new TaskRunnerCallable(request, fragmentInfo, new Configuration(getConfig()),
new LlapExecutionContext(localAddress.get().getHostName(), queryTracker), env,
credentials, memoryPerExecutor, amReporter, confParams, metrics, killedTaskHandler,
- this);
+ this, tezHadoopShim);
submissionState = executorService.schedule(callable);
if (LOG.isInfoEnabled()) {
@@ -239,28 +245,37 @@ public LlapExecutionContext(String hostname, QueryTracker queryTracker) {
@Override
public void initializeHook(TezProcessor source) {
- queryTracker.registerDagQueryId(source.getContext().getDAGName(),
+ queryTracker.registerDagQueryId(
+ new QueryIdentifier(source.getContext().getApplicationId().toString(),
+ source.getContext().getDagIdentifier()),
HiveConf.getVar(source.getConf(), HiveConf.ConfVars.HIVEQUERYID));
}
}
@Override
- public SourceStateUpdatedResponseProto sourceStateUpdated(SourceStateUpdatedRequestProto request) {
+ public SourceStateUpdatedResponseProto sourceStateUpdated(
+ SourceStateUpdatedRequestProto request) {
LOG.info("Processing state update: " + stringifySourceStateUpdateRequest(request));
- queryTracker.registerSourceStateChange(request.getDagName(), request.getSrcName(),
+ queryTracker.registerSourceStateChange(
+ new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
+ request.getQueryIdentifier().getDagIdentifier()), request.getSrcName(),
request.getState());
return SourceStateUpdatedResponseProto.getDefaultInstance();
}
@Override
public QueryCompleteResponseProto queryComplete(QueryCompleteRequestProto request) {
- LOG.info("Processing queryComplete notification for {}", request.getDagName());
+ QueryIdentifier queryIdentifier =
+ new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
+ request.getQueryIdentifier().getDagIdentifier());
+ LOG.info("Processing queryComplete notification for {}", queryIdentifier);
List knownFragments =
- queryTracker.queryComplete(null, request.getDagName(), request.getDeleteDelay());
- LOG.info("DBG: Pending fragment count for completed query {} = {}", request.getDagName(),
+ queryTracker
+ .queryComplete(queryIdentifier, request.getDeleteDelay());
+ LOG.info("DBG: Pending fragment count for completed query {} = {}", queryIdentifier,
knownFragments.size());
for (QueryFragmentInfo fragmentInfo : knownFragments) {
- LOG.info("DBG: Issuing killFragment for completed query {} {}", request.getDagName(),
+ LOG.info("DBG: Issuing killFragment for completed query {} {}", queryIdentifier,
fragmentInfo.getFragmentIdentifierString());
executorService.killFragment(fragmentInfo.getFragmentIdentifierString());
}
@@ -276,7 +291,9 @@ public TerminateFragmentResponseProto terminateFragment(TerminateFragmentRequest
private String stringifySourceStateUpdateRequest(SourceStateUpdatedRequestProto request) {
StringBuilder sb = new StringBuilder();
- sb.append("dagName=").append(request.getDagName())
+ QueryIdentifier queryIdentifier = new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
+ request.getQueryIdentifier().getDagIdentifier());
+ sb.append("queryIdentifier=").append(queryIdentifier)
.append(", ").append("sourceName=").append(request.getSrcName())
.append(", ").append("state=").append(request.getState());
return sb.toString();
@@ -342,14 +359,14 @@ public void fragmentComplete(QueryFragmentInfo fragmentInfo) {
}
@Override
- public void queryFailed(String queryId, String dagName) {
- LOG.info("Processing query failed notification for {}", dagName);
+ public void queryFailed(QueryIdentifier queryIdentifier) {
+ LOG.info("Processing query failed notification for {}", queryIdentifier);
List knownFragments =
- queryTracker.queryComplete(queryId, dagName, -1);
- LOG.info("DBG: Pending fragment count for failed query {} = {}", dagName,
+ queryTracker.queryComplete(queryIdentifier, -1);
+ LOG.info("DBG: Pending fragment count for failed query {} = {}", queryIdentifier,
knownFragments.size());
for (QueryFragmentInfo fragmentInfo : knownFragments) {
- LOG.info("DBG: Issuing killFragment for failed query {} {}", dagName,
+ LOG.info("DBG: Issuing killFragment for failed query {} {}", queryIdentifier,
fragmentInfo.getFragmentIdentifierString());
executorService.killFragment(fragmentInfo.getFragmentIdentifierString());
}
@@ -359,9 +376,9 @@ public void queryFailed(String queryId, String dagName) {
@Override
public void taskKilled(String amLocation, int port, String user,
- Token jobToken, String queryId, String dagName,
+ Token jobToken, QueryIdentifier queryIdentifier,
TezTaskAttemptID taskAttemptId) {
- amReporter.taskKilled(amLocation, port, user, jobToken, queryId, dagName, taskAttemptId);
+ amReporter.taskKilled(amLocation, port, user, jobToken, queryIdentifier, taskAttemptId);
}
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/FunctionLocalizer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/FunctionLocalizer.java
new file mode 100644
index 0000000000000000000000000000000000000000..bc0ad026c64a6b59d55dde43f05f9ed90db1d4e8
--- /dev/null
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/FunctionLocalizer.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URLClassLoader;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.ResourceUri;
+import org.apache.hadoop.hive.ql.exec.FunctionTask;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.FunctionInfo.FunctionResource;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.session.SessionState.ResourceType;
+import org.apache.hadoop.hive.ql.util.ResourceDownloader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class localizes and manages jars for the functions allowed inside LLAP.
+ */
+public class FunctionLocalizer {
+ private static final String DIR_NAME = "fnresources";
+ private static final Logger LOG = LoggerFactory.getLogger(FunctionLocalizer.class);
+ private ResourceDownloader resourceDownloader;
+ private final LinkedBlockingQueue workQueue = new LinkedBlockingQueue<>();
+ private volatile boolean isClosed = false;
+ private final List recentlyLocalizedJars = new LinkedList();
+ private final Thread workThread;
+ private final File localDir;
+ private final Configuration conf;
+ private final URLClassLoader executorClassloader;
+
+ private final ConcurrentHashMap resourcesByFn = new ConcurrentHashMap<>();
+ private final ConcurrentHashMap localFiles = new ConcurrentHashMap<>();
+
+ public FunctionLocalizer(Configuration conf, String localDir) {
+ this.conf = conf;
+ this.localDir = new File(localDir, DIR_NAME);
+ this.executorClassloader = (URLClassLoader)Utilities.createUDFClassLoader(
+ (URLClassLoader)Thread.currentThread().getContextClassLoader(), new String[]{});
+ this.workThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ runWorkThread();
+ }
+ });
+ }
+
+ void init() throws IOException {
+ if (localDir.exists()) {
+ // TODO: We don't want some random jars of unknown provenance sitting around. Or do we care?
+ // Ideally, we should try to reuse jars and verify using some checksum.
+ FileUtils.deleteDirectory(localDir);
+ }
+ this.resourceDownloader = new ResourceDownloader(conf, localDir.getAbsolutePath());
+ workThread.start();
+ }
+
+ public ClassLoader getClassLoader() {
+ return executorClassloader;
+ }
+
+ public void startLocalizeAllFunctions() throws HiveException {
+ List fns = Hive.get(false).getAllFunctions();
+ for (Function fn : fns) {
+ String fqfn = fn.getDbName() + "." + fn.getFunctionName();
+ List resources = fn.getResourceUris();
+ if (resources == null || resources.isEmpty()) continue; // Nothing to localize.
+ FnResources result = new FnResources();
+ resourcesByFn.put(fqfn, result);
+ workQueue.add(new LocalizeFn(fqfn, resources, result, false));
+ }
+ workQueue.add(new RefreshClassloader());
+ }
+
+ public void close() {
+ isClosed = true;
+ workThread.interrupt();
+ try {
+ workThread.join(1000); // Give it some time, then don't delay shutdown too much.
+ } catch (InterruptedException e) {
+ LOG.info("Interrupted during close");
+ }
+ }
+
+ private void runWorkThread() {
+ while (true) {
+ if (isClosed) {
+ deleteAllLocalResources();
+ return;
+ }
+ LocalizerWork lw = null;
+ try {
+ lw = workQueue.take();
+ } catch (InterruptedException ex) {
+ LOG.debug("Localizer thread interrupted");
+ isClosed = true;
+ }
+ if (isClosed) {
+ deleteAllLocalResources();
+ return;
+ }
+ try {
+ lw.run(this);
+ } catch (InterruptedException ex) {
+ LOG.debug("Localizer thread interrupted");
+ isClosed = true;
+ } catch (Exception ex) {
+ LOG.error("Failed to run " + lw, ex);
+ }
+ }
+ }
+
+ private interface LocalizerWork {
+ void run(FunctionLocalizer parent)
+ throws URISyntaxException, IOException, InterruptedException;
+ }
+
+ private static class LocalizeFn implements LocalizerWork {
+ private final List resources;
+ private final FnResources result;
+ private final String fqfn;
+ private final boolean doRefreshClassloader;
+ public LocalizeFn(String fqfn, List resources, FnResources result,
+ boolean doRefreshClassloader) {
+ this.resources = resources;
+ this.result = result;
+ this.fqfn = fqfn;
+ this.doRefreshClassloader = doRefreshClassloader;
+ }
+
+ public void run(FunctionLocalizer parent) throws URISyntaxException, IOException {
+ parent.localizeFunctionResources(fqfn, resources, result, doRefreshClassloader);
+ }
+
+ public String toString() {
+ return "localize " + resources.size() + " resources for " + fqfn;
+ }
+ }
+
+ private static class RefreshClassloader implements LocalizerWork {
+ public void run(FunctionLocalizer parent) throws URISyntaxException, IOException {
+ parent.refreshClassloader();
+ }
+
+ public String toString() {
+ return "load the recently localized jars";
+ }
+ }
+
+ private void deleteAllLocalResources() {
+ try {
+ executorClassloader.close();
+ } catch (Exception ex) {
+ LOG.info("Failed to close the classloader", ex.getMessage());
+ }
+ resourcesByFn.clear();
+ for (RefCountedResource rcr : localFiles.values()) {
+ for (FunctionResource fr : rcr.resources) {
+ // We ignore refcounts (and errors) for now.
+ File file = new File(fr.getResourceURI());
+ try {
+ if (!file.delete()) {
+ LOG.info("Failed to delete " + file);
+ }
+ } catch (Exception ex) {
+ LOG.info("Failed to delete " + file + ": " + ex.getMessage());
+ }
+ }
+ }
+ }
+
+ public void refreshClassloader() throws IOException {
+ if (recentlyLocalizedJars.isEmpty()) return;
+ String[] jars = recentlyLocalizedJars.toArray(new String[0]);
+ recentlyLocalizedJars.clear();
+ ClassLoader updatedCl = null;
+ try {
+ updatedCl = Utilities.addToClassPath(executorClassloader, jars);
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Added " + jars.length + " jars to classpath");
+ }
+ } catch (Throwable t) {
+ // TODO: we could fall back to trying one by one and only ignore the failed ones.
+ String jarringError = "Unable to register jars: ";
+ for (String jar : jars) {
+ jarringError += (jar + ", ");
+ }
+ throw new IOException(jarringError, t);
+ }
+ if (updatedCl != executorClassloader) {
+ throw new AssertionError("Classloader was replaced despite using UDFClassLoader: new "
+ + updatedCl + ", old " + executorClassloader);
+ }
+ }
+
+ private void localizeFunctionResources(String fqfn, List resources,
+ FnResources result, boolean doRefreshClassloader) throws URISyntaxException, IOException {
+ // We will download into fn-scoped subdirectories to avoid name collisions (we assume there
+ // are no collisions within the same fn). That doesn't mean we download for every fn.
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Localizing " + resources.size() + " resources for " + fqfn);
+ }
+ for (ResourceUri resource : resources) {
+ URI srcUri = ResourceDownloader.createURI(resource.getUri());
+ ResourceType rt = FunctionTask.getResourceType(resource.getResourceType());
+ localizeOneResource(fqfn, srcUri, rt, result);
+ }
+ if (doRefreshClassloader) {
+ refreshClassloader();
+ }
+ }
+
+ private void localizeOneResource(String fqfn, URI srcUri, ResourceType rt, FnResources result)
+ throws URISyntaxException, IOException {
+ RefCountedResource rcr = localFiles.get(srcUri);
+ if (rcr != null && rcr.refCount > 0) {
+ logFilesUsed("Reusing", fqfn, srcUri, rcr);
+ ++rcr.refCount;
+ result.addResources(rcr);
+ return;
+ }
+ rcr = new RefCountedResource();
+ List localUris = resourceDownloader.downloadExternal(srcUri, fqfn, false);
+ if (localUris == null || localUris.isEmpty()) {
+ LOG.error("Cannot download " + srcUri + " for " + fqfn);
+ return;
+ }
+ rcr.resources = new ArrayList<>();
+ for (URI uri : localUris) {
+ // Reuse the same type for all. Only Ivy can return more than one, probably all jars.
+ String path = uri.getPath();
+ rcr.resources.add(new FunctionResource(rt, path));
+ if (rt == ResourceType.JAR) {
+ recentlyLocalizedJars.add(path);
+ }
+ }
+ ++rcr.refCount;
+ logFilesUsed("Using", fqfn, srcUri, rcr);
+ localFiles.put(srcUri, rcr);
+ result.addResources(rcr);
+ }
+
+ private void logFilesUsed(String what, String fqfn, URI srcUri, RefCountedResource rcr) {
+ if (!LOG.isInfoEnabled()) return;
+ String desc = (rcr.resources.size() == 1
+ ? rcr.resources.get(0).toString() : (rcr.resources.size() + " files"));
+ LOG.info(what + " files [" + desc + "] for [" + srcUri + "] resource for " + fqfn);
+ }
+
+ private static class RefCountedResource {
+ List resources;
+ int refCount = 0;
+ }
+
+ private static class FnResources {
+ final List localResources = new ArrayList<>();
+ final List originals = new ArrayList<>();
+ public void addResources(RefCountedResource rcr) {
+ localResources.addAll(rcr.resources);
+ originals.add(rcr);
+ }
+ }
+}
\ No newline at end of file
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index ddedfbfb45841d01ce2438ddfc06ae35192c5a5a..7d7fa001cfdc7636f7a5acc1b35558f06480096c 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -19,6 +19,7 @@
import java.lang.management.MemoryPoolMXBean;
import java.lang.management.MemoryType;
import java.net.InetSocketAddress;
+import java.net.URLClassLoader;
import java.util.Arrays;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
@@ -47,6 +48,7 @@
import org.apache.hadoop.hive.llap.metrics.MetricsUtils;
import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
+import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.ExitUtil;
@@ -74,6 +76,7 @@
private final JvmPauseMonitor pauseMonitor;
private final ObjectName llapDaemonInfoBean;
private final LlapDaemonExecutorMetrics metrics;
+ private final FunctionLocalizer fnLocalizer;
// Parameters used for JMX
private final boolean llapIoEnabled;
@@ -161,22 +164,23 @@ public LlapDaemon(Configuration daemonConf, int numExecutors, long executorMemor
LOG.info("Started LlapMetricsSystem with displayName: " + displayName +
" sessionId: " + sessionId);
-
this.amReporter = new AMReporter(srvAddress, new QueryFailedHandlerProxy(), daemonConf);
this.server = new LlapDaemonProtocolServerImpl(
numHandlers, this, srvAddress, mngAddress, srvPort, mngPort);
- this.containerRunner = new ContainerRunnerImpl(daemonConf,
- numExecutors,
- waitQueueSize,
- enablePreemption,
- localDirs,
- this.shufflePort,
- srvAddress,
- executorMemoryBytes,
- metrics,
- amReporter);
+ ClassLoader executorClassLoader = null;
+ if (HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_DAEMON_ALLOW_PERMANENT_FNS)) {
+ this.fnLocalizer = new FunctionLocalizer(daemonConf, localDirs[0]);
+ executorClassLoader = fnLocalizer.getClassLoader();
+ } else {
+ this.fnLocalizer = null;
+ executorClassLoader = Thread.currentThread().getContextClassLoader();
+ }
+
+ this.containerRunner = new ContainerRunnerImpl(daemonConf, numExecutors, waitQueueSize,
+ enablePreemption, localDirs, this.shufflePort, srvAddress, executorMemoryBytes, metrics,
+ amReporter, executorClassLoader);
addIfService(containerRunner);
this.registry = new LlapRegistryService(true);
@@ -235,7 +239,12 @@ private void printAsciiArt() {
public void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
LlapProxy.setDaemon(true);
+ if (fnLocalizer != null) {
+ fnLocalizer.init();
+ fnLocalizer.startLocalizeAllFunctions();
+ }
LlapProxy.initializeLlapIo(conf);
+
}
@Override
@@ -274,6 +283,10 @@ public void shutdown() {
}
LlapProxy.close();
+
+ if (fnLocalizer != null) {
+ fnLocalizer.close();
+ }
}
public static void main(String[] args) throws Exception {
@@ -298,7 +311,7 @@ public static void main(String[] args) throws Exception {
long executorMemoryBytes = HiveConf.getIntVar(
daemonConf, ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB) * 1024l * 1024l;
- long ioMemoryBytes = HiveConf.getLongVar(daemonConf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
+ long ioMemoryBytes = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
boolean isDirectCache = HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_ALLOCATOR_DIRECT);
boolean llapIoEnabled = HiveConf.getBoolVar(daemonConf, HiveConf.ConfVars.LLAP_IO_ENABLED);
llapDaemon = new LlapDaemon(daemonConf, numExecutors, executorMemoryBytes, llapIoEnabled,
@@ -431,12 +444,11 @@ public void uncaughtException(Thread t, Throwable e) {
}
}
-
private class QueryFailedHandlerProxy implements QueryFailedHandler {
@Override
- public void queryFailed(String queryId, String dagName) {
- containerRunner.queryFailed(queryId, dagName);
+ public void queryFailed(QueryIdentifier queryIdentifier) {
+ containerRunner.queryFailed(queryIdentifier);
}
}
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
index fc662545da134b96aa13655693db719a711c7117..bb9f34167b4b817ce401c21b8f00f130d767ac34 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
@@ -350,13 +350,15 @@ private TaskStatusUpdateEvent getStatusUpdateEvent(boolean sendCounters) {
float progress = 0;
if (task.hasInitialized()) {
progress = task.getProgress();
+ // TODO HIVE-12449. Make use of progress notifications once Hive starts sending them out.
+ // progressNotified = task.getAndClearProgressNotification();
if (sendCounters) {
// send these potentially large objects at longer intervals to avoid overloading the AM
counters = task.getCounters();
stats = task.getTaskStatistics();
}
}
- return new TaskStatusUpdateEvent(counters, progress, stats);
+ return new TaskStatusUpdateEvent(counters, progress, stats, true);
}
/**
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryIdentifier.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryIdentifier.java
new file mode 100644
index 0000000000000000000000000000000000000000..96e77e4863cab7215952ff205841784aef8a7fe6
--- /dev/null
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryIdentifier.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+/**
+ * An identifier for a query, which is unique.
+ */
+public final class QueryIdentifier {
+
+ private final String appIdentifier;
+ private final int dagIdentifier;
+
+
+ public QueryIdentifier(String appIdentifier, int dagIdentifier) {
+ this.appIdentifier = appIdentifier;
+ this.dagIdentifier = dagIdentifier;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || !getClass().isAssignableFrom(o.getClass())) {
+ return false;
+ }
+
+ QueryIdentifier that = (QueryIdentifier) o;
+
+ if (dagIdentifier != that.dagIdentifier) {
+ return false;
+ }
+ return appIdentifier.equals(that.appIdentifier);
+
+ }
+
+ @Override
+ public int hashCode() {
+ int result = appIdentifier.hashCode();
+ result = 31 * result + dagIdentifier;
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "QueryIdentifier{" +
+ "appIdentifier='" + appIdentifier + '\'' +
+ ", dagIdentifier=" + dagIdentifier +
+ '}';
+ }
+}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
index 27f2d4c029d430fe37d1617f48342551acd25e4a..64c2b58679dd6a24f51ac4a4237fd60a66849d56 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
@@ -20,11 +20,13 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.ReentrantLock;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
@@ -37,7 +39,7 @@
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto;
public class QueryInfo {
- private final String queryId;
+ private final QueryIdentifier queryIdentifier;
private final String appIdString;
private final String dagName;
private final int dagIdentifier;
@@ -54,10 +56,10 @@
private final FinishableStateTracker finishableStateTracker = new FinishableStateTracker();
- public QueryInfo(String queryId, String appIdString, String dagName, int dagIdentifier,
+ public QueryInfo(QueryIdentifier queryIdentifier, String appIdString, String dagName, int dagIdentifier,
String user, ConcurrentMap sourceStateMap,
String[] localDirsBase, FileSystem localFs) {
- this.queryId = queryId;
+ this.queryIdentifier = queryIdentifier;
this.appIdString = appIdString;
this.dagName = dagName;
this.dagIdentifier = dagIdentifier;
@@ -67,18 +69,14 @@ public QueryInfo(String queryId, String appIdString, String dagName, int dagIden
this.localFs = localFs;
}
- public String getQueryId() {
- return queryId;
+ public QueryIdentifier getQueryIdentifier() {
+ return queryIdentifier;
}
public String getAppIdString() {
return appIdString;
}
- public String getDagName() {
- return dagName;
- }
-
public int getDagIdentifier() {
return dagIdentifier;
}
@@ -170,37 +168,60 @@ void sourceStateUpdated(String sourceName) {
private final Map trackedEntities = new HashMap<>();
private final Multimap sourceToEntity = HashMultimap.create();
- synchronized boolean registerForUpdates(FinishableStateUpdateHandler handler,
+ private final ReentrantLock lock = new ReentrantLock();
+
+ boolean registerForUpdates(FinishableStateUpdateHandler handler,
List sources, QueryFragmentInfo fragmentInfo,
boolean lastFinishableState) {
- EntityInfo entityInfo =
- new EntityInfo(handler, sources, fragmentInfo, lastFinishableState);
- if (trackedEntities.put(handler, entityInfo) != null) {
- throw new IllegalStateException(
- "Only a single registration allowed per entity. Duplicate for " + handler.toString());
- }
- for (String source : sources) {
- sourceToEntity.put(source, entityInfo);
- }
+ lock.lock();
+ try {
+ EntityInfo entityInfo =
+ new EntityInfo(handler, sources, fragmentInfo, lastFinishableState);
+ if (trackedEntities.put(handler, entityInfo) != null) {
+ throw new IllegalStateException(
+ "Only a single registration allowed per entity. Duplicate for " + handler.toString());
+ }
+ for (String source : sources) {
+ sourceToEntity.put(source, entityInfo);
+ }
- if (lastFinishableState != fragmentInfo.canFinish()) {
- entityInfo.setLastFinishableState(fragmentInfo.canFinish());
- return false;
- } else {
- return true;
+ if (lastFinishableState != fragmentInfo.canFinish()) {
+ entityInfo.setLastFinishableState(fragmentInfo.canFinish());
+ return false;
+ } else {
+ return true;
+ }
+ } finally {
+ lock.unlock();
}
}
- synchronized void unregisterForUpdates(FinishableStateUpdateHandler handler) {
- EntityInfo info = trackedEntities.remove(handler);
- Preconditions.checkState(info != null, "Cannot invoke unregister on an entity which has not been registered");
- for (String source : info.getSources()) {
- sourceToEntity.remove(source, info);
+ void unregisterForUpdates(FinishableStateUpdateHandler handler) {
+ lock.lock();
+ try {
+ EntityInfo info = trackedEntities.remove(handler);
+ Preconditions.checkState(info != null,
+ "Cannot invoke unregister on an entity which has not been registered");
+ for (String source : info.getSources()) {
+ sourceToEntity.remove(source, info);
+ }
+ } finally {
+ lock.unlock();
}
}
- synchronized void sourceStateUpdated(String sourceName) {
- Collection interestedEntityInfos = sourceToEntity.get(sourceName);
+ void sourceStateUpdated(String sourceName) {
+ List interestedEntityInfos = null;
+ lock.lock();
+ try {
+ Collection entities = sourceToEntity.get(sourceName);
+ if (entities != null) {
+ // Create a copy since the underlying list can be changed elsewhere.
+ interestedEntityInfos = new LinkedList<>(entities);
+ }
+ } finally {
+ lock.unlock();
+ }
if (interestedEntityInfos != null) {
for (EntityInfo entityInfo : interestedEntityInfos) {
boolean newFinishState = entityInfo.getFragmentInfo().canFinish();
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
index 6deaefcf52e80448f7591d795f6ca70e5645306b..0676eddd5fb396d8e0b358e2923c2c764d779909 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
@@ -56,8 +56,7 @@
private final ScheduledExecutorService executorService;
- // TODO Make use if the query id for cachin when this is available.
- private final ConcurrentHashMap queryInfoMap = new ConcurrentHashMap<>();
+ private final ConcurrentHashMap queryInfoMap = new ConcurrentHashMap<>();
private final String[] localDirsBase;
private final FileSystem localFs;
@@ -70,22 +69,25 @@
// Alternately - send in an explicit dag start message before any other message is processed.
// Multiple threads communicating from a single AM gets in the way of this.
- // Keeps track of completed dags. Assumes dag names are unique across AMs.
- private final Set completedDagMap = Collections.newSetFromMap(
- new ConcurrentHashMap());
+ // Keeps track of completed DAGS. QueryIdentifiers need to be unique across applications.
+ private final Set completedDagMap =
+ Collections.newSetFromMap(new ConcurrentHashMap());
private final Lock lock = new ReentrantLock();
- private final ConcurrentMap dagSpecificLocks = new ConcurrentHashMap<>();
+ private final ConcurrentMap dagSpecificLocks = new ConcurrentHashMap<>();
// Tracks various maps for dagCompletions. This is setup here since stateChange messages
// may be processed by a thread which ends up executing before a task.
- private final ConcurrentMap>
- sourceCompletionMap = new ConcurrentHashMap<>();
+ private final ConcurrentMap>
+ sourceCompletionMap = new ConcurrentHashMap<>();
- // Tracks queryId by dagName. This can only be set when config is parsed in TezProcessor,
+ // Tracks HiveQueryId by QueryIdentifier. This can only be set when config is parsed in TezProcessor.
// all the other existing code passes queryId equal to 0 everywhere.
- private final ConcurrentHashMap dagNameToQueryId = new ConcurrentHashMap<>();
+ // If we switch the runtime and move to parsing the payload in the AM - the actual hive queryId could
+ // be sent over the wire from the AM, and will take the place of AppId+dagId in QueryIdentifier.
+ private final ConcurrentHashMap queryIdentifierToHiveQueryId =
+ new ConcurrentHashMap<>();
public QueryTracker(Configuration conf, String[] localDirsBase) {
super("QueryTracker");
@@ -107,7 +109,7 @@ public QueryTracker(Configuration conf, String[] localDirsBase) {
/**
* Register a new fragment for a specific query
- * @param queryId
+ * @param queryIdentifier
* @param appIdString
* @param dagName
* @param dagIdentifier
@@ -117,23 +119,23 @@ public QueryTracker(Configuration conf, String[] localDirsBase) {
* @param user
* @throws IOException
*/
- QueryFragmentInfo registerFragment(String queryId, String appIdString, String dagName,
+ QueryFragmentInfo registerFragment(QueryIdentifier queryIdentifier, String appIdString, String dagName,
int dagIdentifier, String vertexName, int fragmentNumber, int attemptNumber, String user,
FragmentSpecProto fragmentSpec) throws IOException {
- ReadWriteLock dagLock = getDagLock(dagName);
+ ReadWriteLock dagLock = getDagLock(queryIdentifier);
dagLock.readLock().lock();
try {
- if (!completedDagMap.contains(dagName)) {
- QueryInfo queryInfo = queryInfoMap.get(dagName);
+ if (!completedDagMap.contains(queryIdentifier)) {
+ QueryInfo queryInfo = queryInfoMap.get(queryIdentifier);
if (queryInfo == null) {
- queryInfo = new QueryInfo(queryId, appIdString, dagName, dagIdentifier, user,
- getSourceCompletionMap(dagName), localDirsBase, localFs);
- queryInfoMap.putIfAbsent(dagName, queryInfo);
+ queryInfo = new QueryInfo(queryIdentifier, appIdString, dagName, dagIdentifier, user,
+ getSourceCompletionMap(queryIdentifier), localDirsBase, localFs);
+ queryInfoMap.putIfAbsent(queryIdentifier, queryInfo);
}
return queryInfo.registerFragment(vertexName, fragmentNumber, attemptNumber, fragmentSpec);
} else {
// Cleanup the dag lock here, since it may have been created after the query completed
- dagSpecificLocks.remove(dagName);
+ dagSpecificLocks.remove(queryIdentifier);
throw new RuntimeException(
"Dag " + dagName + " already complete. Rejecting fragment ["
+ vertexName + ", " + fragmentNumber + ", " + attemptNumber + "]");
@@ -148,12 +150,12 @@ QueryFragmentInfo registerFragment(String queryId, String appIdString, String da
* @param fragmentInfo
*/
void fragmentComplete(QueryFragmentInfo fragmentInfo) {
- String dagName = fragmentInfo.getQueryInfo().getDagName();
- QueryInfo queryInfo = queryInfoMap.get(dagName);
+ QueryIdentifier qId = fragmentInfo.getQueryInfo().getQueryIdentifier();
+ QueryInfo queryInfo = queryInfoMap.get(qId);
if (queryInfo == null) {
// Possible because a queryComplete message from the AM can come in first - KILL / SUCCESSFUL,
// before the fragmentComplete is reported
- LOG.info("Ignoring fragmentComplete message for unknown query");
+ LOG.info("Ignoring fragmentComplete message for unknown query: {}", qId);
} else {
queryInfo.unregisterFragment(fragmentInfo);
}
@@ -161,42 +163,40 @@ void fragmentComplete(QueryFragmentInfo fragmentInfo) {
/**
* Register completion for a query
- * @param queryId
- * @param dagName
+ * @param queryIdentifier
* @param deleteDelay
*/
- List queryComplete(String queryId, String dagName, long deleteDelay) {
+ List queryComplete(QueryIdentifier queryIdentifier, long deleteDelay) {
if (deleteDelay == -1) {
deleteDelay = defaultDeleteDelaySeconds;
}
- ReadWriteLock dagLock = getDagLock(dagName);
+ ReadWriteLock dagLock = getDagLock(queryIdentifier);
dagLock.writeLock().lock();
try {
- rememberCompletedDag(dagName);
- LOG.info("Processing queryComplete for dagName={} with deleteDelay={} seconds",
- dagName, deleteDelay);
- QueryInfo queryInfo = queryInfoMap.remove(dagName);
+ rememberCompletedDag(queryIdentifier);
+ LOG.info("Processing queryComplete for queryIdentifier={} with deleteDelay={} seconds", queryIdentifier,
+ deleteDelay);
+ QueryInfo queryInfo = queryInfoMap.remove(queryIdentifier);
if (queryInfo == null) {
- LOG.warn("Ignoring query complete for unknown dag: {}", dagName);
+ LOG.warn("Ignoring query complete for unknown dag: {}", queryIdentifier);
return Collections.emptyList();
}
String[] localDirs = queryInfo.getLocalDirsNoCreate();
if (localDirs != null) {
for (String localDir : localDirs) {
cleanupDir(localDir, deleteDelay);
- ShuffleHandler.get().unregisterDag(localDir, dagName, queryInfo.getDagIdentifier());
+ ShuffleHandler.get().unregisterDag(localDir, queryInfo.getAppIdString(), queryInfo.getDagIdentifier());
}
}
// Clearing this before sending a kill is OK, since canFinish will change to false.
// Ideally this should be a state machine where kills are issued to the executor,
// and the structures are cleaned up once all tasks complete. New requests, however,
// should not be allowed after a query complete is received.
- sourceCompletionMap.remove(dagName);
- String savedQueryId = dagNameToQueryId.remove(dagName);
- queryId = queryId == null ? savedQueryId : queryId;
- dagSpecificLocks.remove(dagName);
- if (queryId != null) {
- ObjectCacheFactory.removeLlapQueryCache(queryId);
+ sourceCompletionMap.remove(queryIdentifier);
+ String savedQueryId = queryIdentifierToHiveQueryId.remove(queryIdentifier);
+ dagSpecificLocks.remove(queryIdentifier);
+ if (savedQueryId != null) {
+ ObjectCacheFactory.removeLlapQueryCache(savedQueryId);
}
return queryInfo.getRegisteredFragments();
} finally {
@@ -206,24 +206,24 @@ void fragmentComplete(QueryFragmentInfo fragmentInfo) {
- public void rememberCompletedDag(String dagName) {
- if (completedDagMap.add(dagName)) {
+ public void rememberCompletedDag(QueryIdentifier queryIdentifier) {
+ if (completedDagMap.add(queryIdentifier)) {
// We will remember completed DAG for an hour to avoid execution out-of-order fragments.
- executorService.schedule(new DagMapCleanerCallable(dagName), 1, TimeUnit.HOURS);
+ executorService.schedule(new DagMapCleanerCallable(queryIdentifier), 1, TimeUnit.HOURS);
} else {
- LOG.warn("Couldn't add {} to completed dag set", dagName);
+ LOG.warn("Couldn't add {} to completed dag set", queryIdentifier);
}
}
/**
* Register an update to a source within an executing dag
- * @param dagName
+ * @param queryIdentifier
* @param sourceName
* @param sourceState
*/
- void registerSourceStateChange(String dagName, String sourceName, SourceStateProto sourceState) {
- getSourceCompletionMap(dagName).put(sourceName, sourceState);
- QueryInfo queryInfo = queryInfoMap.get(dagName);
+ void registerSourceStateChange(QueryIdentifier queryIdentifier, String sourceName, SourceStateProto sourceState) {
+ getSourceCompletionMap(queryIdentifier).put(sourceName, sourceState);
+ QueryInfo queryInfo = queryInfoMap.get(queryIdentifier);
if (queryInfo != null) {
queryInfo.sourceStateUpdated(sourceName);
} else {
@@ -233,13 +233,13 @@ void registerSourceStateChange(String dagName, String sourceName, SourceStatePro
}
- private ReadWriteLock getDagLock(String dagName) {
+ private ReadWriteLock getDagLock(QueryIdentifier queryIdentifier) {
lock.lock();
try {
- ReadWriteLock dagLock = dagSpecificLocks.get(dagName);
+ ReadWriteLock dagLock = dagSpecificLocks.get(queryIdentifier);
if (dagLock == null) {
dagLock = new ReentrantReadWriteLock();
- dagSpecificLocks.put(dagName, dagLock);
+ dagSpecificLocks.put(queryIdentifier, dagLock);
}
return dagLock;
} finally {
@@ -247,20 +247,20 @@ private ReadWriteLock getDagLock(String dagName) {
}
}
- private ConcurrentMap getSourceCompletionMap(String dagName) {
- ConcurrentMap dagMap = sourceCompletionMap.get(dagName);
+ private ConcurrentMap getSourceCompletionMap(QueryIdentifier queryIdentifier) {
+ ConcurrentMap dagMap = sourceCompletionMap.get(queryIdentifier);
if (dagMap == null) {
dagMap = new ConcurrentHashMap<>();
ConcurrentMap old =
- sourceCompletionMap.putIfAbsent(dagName, dagMap);
+ sourceCompletionMap.putIfAbsent(queryIdentifier, dagMap);
dagMap = (old != null) ? old : dagMap;
}
return dagMap;
}
- public void registerDagQueryId(String dagName, String queryId) {
- if (queryId == null) return;
- dagNameToQueryId.putIfAbsent(dagName, queryId);
+ public void registerDagQueryId(QueryIdentifier queryIdentifier, String hiveQueryIdString) {
+ if (hiveQueryIdString == null) return;
+ queryIdentifierToHiveQueryId.putIfAbsent(queryIdentifier, hiveQueryIdString);
}
@Override
@@ -302,15 +302,15 @@ protected Void callInternal() {
}
private class DagMapCleanerCallable extends CallableWithNdc {
- private final String dagName;
+ private final QueryIdentifier queryIdentifier;
- private DagMapCleanerCallable(String dagName) {
- this.dagName = dagName;
+ private DagMapCleanerCallable(QueryIdentifier queryIdentifier) {
+ this.queryIdentifier = queryIdentifier;
}
@Override
protected Void callInternal() {
- completedDagMap.remove(dagName);
+ completedDagMap.remove(queryIdentifier);
return null;
}
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 34aa5c9025021168c0e7062d874663c1c94246bb..57dd828873f8cd64b34b933ebd8eeed5346e703e 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -33,10 +33,12 @@
import java.util.concurrent.PriorityBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.hive.llap.daemon.FinishableStateUpdateHandler;
@@ -76,8 +78,6 @@
* new tasks. Shutting down of the task executor service can be done gracefully or immediately.
*/
public class TaskExecutorService extends AbstractService implements Scheduler {
-
-
private static final Logger LOG = LoggerFactory.getLogger(TaskExecutorService.class);
private static final boolean isInfoEnabled = LOG.isInfoEnabled();
private static final boolean isDebugEnabled = LOG.isDebugEnabled();
@@ -105,8 +105,9 @@
private final Object lock = new Object();
- public TaskExecutorService(int numExecutors, int waitQueueSize, String waitQueueComparatorClassName,
- boolean enablePreemption) {
+ public TaskExecutorService(int numExecutors, int waitQueueSize,
+ String waitQueueComparatorClassName, boolean enablePreemption,
+ ClassLoader classLoader) {
super(TaskExecutorService.class.getSimpleName());
LOG.info("TaskExecutorService is being setup with parameters: "
+ "numExecutors=" + numExecutors
@@ -114,31 +115,13 @@ public TaskExecutorService(int numExecutors, int waitQueueSize, String waitQueue
+ ", waitQueueComparatorClassName=" + waitQueueComparatorClassName
+ ", enablePreemption=" + enablePreemption);
- final Comparator waitQueueComparator;
- try {
- Class extends Comparator> waitQueueComparatorClazz =
- (Class extends Comparator>) Class.forName(
- waitQueueComparatorClassName);
- Constructor extends Comparator> ctor = waitQueueComparatorClazz.getConstructor(null);
- waitQueueComparator = ctor.newInstance(null);
- } catch (ClassNotFoundException e) {
- throw new RuntimeException(
- "Failed to load wait queue comparator, class=" + waitQueueComparatorClassName, e);
- } catch (NoSuchMethodException e) {
- throw new RuntimeException("Failed to find constructor for wait queue comparator, class=" +
- waitQueueComparatorClassName, e);
- } catch (InvocationTargetException | InstantiationException | IllegalAccessException e) {
- throw new RuntimeException(
- "Failed to find instantiate wait queue comparator, class=" + waitQueueComparatorClassName,
- e);
- }
+ final Comparator waitQueueComparator = createComparator(
+ waitQueueComparatorClassName);
this.waitQueue = new EvictingPriorityBlockingQueue<>(waitQueueComparator, waitQueueSize);
this.threadPoolExecutor = new ThreadPoolExecutor(numExecutors, // core pool size
numExecutors, // max pool size
- 1, TimeUnit.MINUTES,
- new SynchronousQueue(), // direct hand-off
- new ThreadFactoryBuilder().setDaemon(true).setNameFormat(TASK_EXECUTOR_THREAD_NAME_FORMAT)
- .build());
+ 1, TimeUnit.MINUTES, new SynchronousQueue(), // direct hand-off
+ new ExecutorThreadFactory(classLoader));
this.executorService = MoreExecutors.listeningDecorator(threadPoolExecutor);
this.preemptionQueue = new PriorityBlockingQueue<>(numExecutors,
new PreemptionQueueComparator());
@@ -146,18 +129,38 @@ public TaskExecutorService(int numExecutors, int waitQueueSize, String waitQueue
this.numSlotsAvailable = new AtomicInteger(numExecutors);
// single threaded scheduler for tasks from wait queue to executor threads
- ExecutorService wes = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat(WAIT_QUEUE_SCHEDULER_THREAD_NAME_FORMAT).build());
+ ExecutorService wes = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
+ .setDaemon(true).setNameFormat(WAIT_QUEUE_SCHEDULER_THREAD_NAME_FORMAT).build());
this.waitQueueExecutorService = MoreExecutors.listeningDecorator(wes);
ExecutorService executionCompletionExecutorServiceRaw = Executors.newFixedThreadPool(1,
new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ExecutionCompletionThread #%d")
.build());
- executionCompletionExecutorService = MoreExecutors.listeningDecorator(executionCompletionExecutorServiceRaw);
+ executionCompletionExecutorService = MoreExecutors.listeningDecorator(
+ executionCompletionExecutorServiceRaw);
ListenableFuture> future = waitQueueExecutorService.submit(new WaitQueueWorker());
Futures.addCallback(future, new WaitQueueWorkerCallback());
+ }
-
+ private Comparator createComparator(
+ String waitQueueComparatorClassName) {
+ final Comparator waitQueueComparator;
+ try {
+ Class extends Comparator> waitQueueComparatorClazz =
+ (Class extends Comparator>) Class.forName(waitQueueComparatorClassName);
+ Constructor extends Comparator> ctor = waitQueueComparatorClazz.getConstructor(null);
+ waitQueueComparator = ctor.newInstance(null);
+ } catch (ClassNotFoundException e) {
+ throw new RuntimeException(
+ "Failed to load wait queue comparator, class=" + waitQueueComparatorClassName, e);
+ } catch (NoSuchMethodException e) {
+ throw new RuntimeException("Failed to find constructor for wait queue comparator, class=" +
+ waitQueueComparatorClassName, e);
+ } catch (InvocationTargetException | InstantiationException | IllegalAccessException e) {
+ throw new RuntimeException("Failed to find instantiate wait queue comparator, class="
+ + waitQueueComparatorClassName, e);
+ }
+ return waitQueueComparator;
}
@Override
@@ -320,6 +323,7 @@ public SubmissionState schedule(TaskRunnerCallable task) {
TaskWrapper taskWrapper = new TaskWrapper(task, this);
SubmissionState result;
TaskWrapper evictedTask;
+ boolean canFinish;
synchronized (lock) {
// If the queue does not have capacity, it does not throw a Rejection. Instead it will
// return the task with the lowest priority, which could be the task which is currently being processed.
@@ -327,6 +331,7 @@ public SubmissionState schedule(TaskRunnerCallable task) {
// TODO HIVE-11687 It's possible for a bunch of tasks to come in around the same time, without the
// actual executor threads picking up any work. This will lead to unnecessary rejection of tasks.
// The wait queue should be able to fit at least (waitQueue + currentFreeExecutor slots)
+ canFinish = taskWrapper.getTaskRunnerCallable().canFinish();
evictedTask = waitQueue.offer(taskWrapper);
// null evicted task means offer accepted
@@ -366,10 +371,14 @@ public SubmissionState schedule(TaskRunnerCallable task) {
// This registration has to be done after knownTasks has been populated.
// Register for state change notifications so that the waitQueue can be re-ordered correctly
// if the fragment moves in or out of the finishable state.
- boolean canFinish = taskWrapper.getTaskRunnerCallable().canFinish();
- // It's safe to register outside of the lock since the stateChangeTracker ensures that updates
- // and registrations are mutually exclusive.
- taskWrapper.maybeRegisterForFinishedStateNotifications(canFinish);
+ boolean stateChanged = taskWrapper.maybeRegisterForFinishedStateNotifications(canFinish);
+ if (stateChanged) {
+ if (isDebugEnabled) {
+ LOG.debug("Finishable state of {} updated to {} during registration for state updates",
+ taskWrapper.getRequestId(), !canFinish);
+ }
+ finishableStateUpdated(taskWrapper, !canFinish);
+ }
if (isDebugEnabled) {
LOG.debug("Wait Queue: {}", waitQueue);
@@ -397,14 +406,14 @@ public void killFragment(String fragmentId) {
TaskWrapper taskWrapper = knownTasks.remove(fragmentId);
// Can be null since the task may have completed meanwhile.
if (taskWrapper != null) {
- if (taskWrapper.inWaitQueue) {
+ if (taskWrapper.isInWaitQueue()) {
if (isDebugEnabled) {
LOG.debug("Removing {} from waitQueue", fragmentId);
}
taskWrapper.setIsInWaitQueue(false);
waitQueue.remove(taskWrapper);
}
- if (taskWrapper.inPreemptionQueue) {
+ if (taskWrapper.isInPreemptionQueue()) {
if (isDebugEnabled) {
LOG.debug("Removing {} from preemptionQueue", fragmentId);
}
@@ -424,9 +433,11 @@ private void trySchedule(final TaskWrapper taskWrapper) throws RejectedExecution
synchronized (lock) {
boolean canFinish = taskWrapper.getTaskRunnerCallable().canFinish();
LOG.info("Attempting to execute {}", taskWrapper);
- ListenableFuture future = executorService.submit(taskWrapper.getTaskRunnerCallable());
+ ListenableFuture future = executorService.submit(
+ taskWrapper.getTaskRunnerCallable());
taskWrapper.setIsInWaitQueue(false);
- FutureCallback wrappedCallback = createInternalCompletionListener(taskWrapper);
+ FutureCallback wrappedCallback = createInternalCompletionListener(
+ taskWrapper);
// Callback on a separate thread so that when a task completes, the thread in the main queue
// is actually available for execution and will not potentially result in a RejectedExecution
Futures.addCallback(future, wrappedCallback, executionCompletionExecutorService);
@@ -452,7 +463,8 @@ private void trySchedule(final TaskWrapper taskWrapper) throws RejectedExecution
}
private void handleScheduleAttemptedRejection(TaskWrapper taskWrapper) {
- if (enablePreemption && taskWrapper.getTaskRunnerCallable().canFinish() && !preemptionQueue.isEmpty()) {
+ if (enablePreemption && taskWrapper.getTaskRunnerCallable().canFinish()
+ && !preemptionQueue.isEmpty()) {
if (isDebugEnabled) {
LOG.debug("Preemption Queue: " + preemptionQueue);
@@ -644,9 +656,9 @@ public int compare(TaskWrapper t1, TaskWrapper t2) {
public static class TaskWrapper implements FinishableStateUpdateHandler {
private final TaskRunnerCallable taskRunnerCallable;
- private boolean inWaitQueue = false;
- private boolean inPreemptionQueue = false;
- private boolean registeredForNotifications = false;
+ private final AtomicBoolean inWaitQueue = new AtomicBoolean(false);
+ private final AtomicBoolean inPreemptionQueue = new AtomicBoolean(false);
+ private final AtomicBoolean registeredForNotifications = new AtomicBoolean(false);
private final TaskExecutorService taskExecutorService;
public TaskWrapper(TaskRunnerCallable taskRunnerCallable, TaskExecutorService taskExecutorService) {
@@ -654,18 +666,16 @@ public TaskWrapper(TaskRunnerCallable taskRunnerCallable, TaskExecutorService ta
this.taskExecutorService = taskExecutorService;
}
- // Methods are synchronized primarily for visibility.
+ // Don't invoke from within a scheduler lock
+
/**
*
* @param currentFinishableState
- * @return true if the current state is the same as the currentFinishableState. false if the state has already changed.
+ * @return true if the state has not changed from currentFinishableState, false otherwise
*/
- // Synchronized to avoid register / unregister clobbering each other.
- // Don't invoke from within a scheduler lock
- public synchronized boolean maybeRegisterForFinishedStateNotifications(
+ public boolean maybeRegisterForFinishedStateNotifications(
boolean currentFinishableState) {
- if (!registeredForNotifications) {
- registeredForNotifications = true;
+ if (!registeredForNotifications.getAndSet(true)) {
return taskRunnerCallable.getFragmentInfo()
.registerForFinishableStateUpdates(this, currentFinishableState);
} else {
@@ -673,11 +683,9 @@ public synchronized boolean maybeRegisterForFinishedStateNotifications(
}
}
- // Synchronized to avoid register / unregister clobbering each other.
// Don't invoke from within a scheduler lock
- public synchronized void maybeUnregisterForFinishedStateNotifications() {
- if (registeredForNotifications) {
- registeredForNotifications = false;
+ public void maybeUnregisterForFinishedStateNotifications() {
+ if (registeredForNotifications.getAndSet(false)) {
taskRunnerCallable.getFragmentInfo().unregisterForFinishableStateUpdates(this);
}
}
@@ -686,20 +694,20 @@ public TaskRunnerCallable getTaskRunnerCallable() {
return taskRunnerCallable;
}
- public synchronized boolean isInWaitQueue() {
- return inWaitQueue;
+ public boolean isInWaitQueue() {
+ return inWaitQueue.get();
}
- public synchronized boolean isInPreemptionQueue() {
- return inPreemptionQueue;
+ public boolean isInPreemptionQueue() {
+ return inPreemptionQueue.get();
}
- public synchronized void setIsInWaitQueue(boolean value) {
- this.inWaitQueue = value;
+ public void setIsInWaitQueue(boolean value) {
+ this.inWaitQueue.set(value);
}
- public synchronized void setIsInPreemptableQueue(boolean value) {
- this.inPreemptionQueue = value;
+ public void setIsInPreemptableQueue(boolean value) {
+ this.inPreemptionQueue.set(value);
}
public String getRequestId() {
@@ -710,9 +718,9 @@ public String getRequestId() {
public String toString() {
return "TaskWrapper{" +
"task=" + taskRunnerCallable.getRequestId() +
- ", inWaitQueue=" + inWaitQueue +
- ", inPreemptionQueue=" + inPreemptionQueue +
- ", registeredForNotifications=" + registeredForNotifications +
+ ", inWaitQueue=" + inWaitQueue.get() +
+ ", inPreemptionQueue=" + inPreemptionQueue.get() +
+ ", registeredForNotifications=" + registeredForNotifications.get() +
", canFinish=" + taskRunnerCallable.canFinish() +
", firstAttemptStartTime=" + taskRunnerCallable.getFragmentRuntimeInfo().getFirstAttemptStartTime() +
", dagStartTime=" + taskRunnerCallable.getFragmentRuntimeInfo().getDagStartTime() +
@@ -733,4 +741,24 @@ public void finishableStateUpdated(boolean finishableState) {
taskExecutorService.finishableStateUpdated(this, finishableState);
}
}
+
+ private static class ExecutorThreadFactory implements ThreadFactory {
+ private final ClassLoader classLoader;
+ private final ThreadFactory defaultFactory;
+ private final AtomicLong count = new AtomicLong(0);
+
+ public ExecutorThreadFactory(ClassLoader classLoader) {
+ this.classLoader = classLoader;
+ this.defaultFactory = Executors.defaultThreadFactory();
+ }
+
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread thread = defaultFactory.newThread(r);
+ thread.setName(String.format(TASK_EXECUTOR_THREAD_NAME_FORMAT, count.getAndIncrement()));
+ thread.setDaemon(true);
+ thread.setContextClassLoader(classLoader);
+ return thread;
+ }
+ }
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index f03a2ff4d889752bc613ee2d5c03f730cdb62263..ede2a03389c7b3815bb6b110d0d84610f3839bc7 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -51,6 +51,7 @@
import org.apache.tez.common.security.JobTokenIdentifier;
import org.apache.tez.common.security.TokenCache;
import org.apache.tez.dag.api.TezConstants;
+import org.apache.tez.hadoop.shim.HadoopShim;
import org.apache.tez.runtime.api.ExecutionContext;
import org.apache.tez.runtime.api.impl.TaskSpec;
import org.apache.tez.runtime.common.objectregistry.ObjectRegistryImpl;
@@ -100,6 +101,7 @@
private final LlapDaemonExecutorMetrics metrics;
private final String requestId;
private final String queryId;
+ private final HadoopShim tezHadoopShim;
private boolean shouldRunTask = true;
final Stopwatch runtimeWatch = new Stopwatch();
final Stopwatch killtimerWatch = new Stopwatch();
@@ -115,7 +117,8 @@ public TaskRunnerCallable(SubmitWorkRequestProto request, QueryFragmentInfo frag
long memoryAvailable, AMReporter amReporter,
ConfParams confParams, LlapDaemonExecutorMetrics metrics,
KilledTaskHandler killedTaskHandler,
- FragmentCompletionHandler fragmentCompleteHandler) {
+ FragmentCompletionHandler fragmentCompleteHandler,
+ HadoopShim tezHadoopShim) {
this.request = request;
this.fragmentInfo = fragmentInfo;
this.conf = conf;
@@ -131,7 +134,7 @@ public TaskRunnerCallable(SubmitWorkRequestProto request, QueryFragmentInfo frag
// Register with the AMReporter when the callable is setup. Unregister once it starts running.
if (jobToken != null) {
this.amReporter.registerTask(request.getAmHost(), request.getAmPort(),
- request.getUser(), jobToken, null, request.getFragmentSpec().getDagName());
+ request.getUser(), jobToken, fragmentInfo.getQueryInfo().getQueryIdentifier());
}
this.metrics = metrics;
this.requestId = request.getFragmentSpec().getFragmentIdentifierString();
@@ -139,6 +142,7 @@ public TaskRunnerCallable(SubmitWorkRequestProto request, QueryFragmentInfo frag
this.queryId = request.getFragmentSpec().getDagName();
this.killedTaskHandler = killedTaskHandler;
this.fragmentCompletionHanler = fragmentCompleteHandler;
+ this.tezHadoopShim = tezHadoopShim;
}
public long getStartTime() {
@@ -216,7 +220,7 @@ public LlapTaskUmbilicalProtocol run() throws Exception {
serviceConsumerMetadata, envMap, startedInputsMap, taskReporter, executor,
objectRegistry,
pid,
- executionContext, memoryAvailable, false);
+ executionContext, memoryAvailable, false, tezHadoopShim);
}
}
if (taskRunner == null) {
@@ -297,9 +301,8 @@ public void killTask() {
*/
public void reportTaskKilled() {
killedTaskHandler
- .taskKilled(request.getAmHost(), request.getAmPort(), request.getUser(), jobToken, null,
- taskSpec.getDAGName(),
- taskSpec.getTaskAttemptID());
+ .taskKilled(request.getAmHost(), request.getAmPort(), request.getUser(), jobToken,
+ fragmentInfo.getQueryInfo().getQueryIdentifier(), taskSpec.getTaskAttemptID());
}
public boolean canFinish() {
@@ -428,6 +431,7 @@ public void onSuccess(TaskRunner2Result result) {
HistoryLogger
.logFragmentEnd(request.getApplicationIdString(), request.getContainerIdString(),
executionContext.getHostName(), request.getFragmentSpec().getDagName(),
+ fragmentInfo.getQueryInfo().getDagIdentifier(),
request.getFragmentSpec().getVertexName(),
request.getFragmentSpec().getFragmentNumber(),
request.getFragmentSpec().getAttemptNumber(), taskRunnerCallable.threadName,
@@ -445,6 +449,7 @@ public void onFailure(Throwable t) {
HistoryLogger
.logFragmentEnd(request.getApplicationIdString(), request.getContainerIdString(),
executionContext.getHostName(), request.getFragmentSpec().getDagName(),
+ fragmentInfo.getQueryInfo().getDagIdentifier(),
request.getFragmentSpec().getVertexName(),
request.getFragmentSpec().getFragmentNumber(),
request.getFragmentSpec().getAttemptNumber(), taskRunnerCallable.threadName,
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 3ddfc294e30fc8bf84e50aff68e9604e2a4dd359..1f1aac7bf5b8465f9ff7658bf35beaec5076a873 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -513,7 +513,7 @@ private static String getDbAndTableName(Path path) {
private void validateFileMetadata() throws IOException {
if (fileMetadata.getCompressionKind() == CompressionKind.NONE) return;
int bufferSize = fileMetadata.getCompressionBufferSize();
- int minAllocSize = HiveConf.getIntVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MIN_ALLOC);
+ long minAllocSize = HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC);
if (bufferSize < minAllocSize) {
LOG.warn("ORC compression buffer size (" + bufferSize + ") is smaller than LLAP low-level "
+ "cache minimum allocation size (" + minAllocSize + "). Decrease the value for "
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java
index 7428a6a0d5c30dbc133d9bf7e4a22e8f2fde0c81..f61d62f011554429b68f06686e9d5dd8c57e68bb 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/Converters.java
@@ -262,5 +262,4 @@ public static SourceStateProto fromVertexState(VertexState state) {
throw new RuntimeException("Unexpected state: " + state);
}
}
-
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
index 5c370eef18e3037ebdc8abee4497cca04e461d1c..eb6384f4c0cf8b3ad732d62e063c313542f18e7d 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
@@ -62,7 +63,6 @@
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.tez.common.TezTaskUmbilicalProtocol;
import org.apache.tez.common.security.JobTokenSecretManager;
-import org.apache.tez.dag.api.TaskCommunicatorContext;
import org.apache.tez.dag.api.TezConfiguration;
import org.apache.tez.dag.api.TezException;
import org.apache.tez.dag.api.TezUncheckedException;
@@ -74,6 +74,7 @@
import org.apache.tez.runtime.api.impl.TezHeartbeatResponse;
import org.apache.tez.serviceplugins.api.ContainerEndReason;
import org.apache.tez.serviceplugins.api.TaskAttemptEndReason;
+import org.apache.tez.serviceplugins.api.TaskCommunicatorContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -85,7 +86,8 @@
private static final boolean isDebugEnabed = LOG.isDebugEnabled();
private final SubmitWorkRequestProto BASE_SUBMIT_WORK_REQUEST;
- private final ConcurrentMap credentialMap;
+
+ private final ConcurrentMap credentialMap;
// Tracks containerIds and taskAttemptIds, so can be kept independent of the running DAG.
// When DAG specific cleanup happens, it'll be better to link this to a DAG though.
@@ -104,7 +106,8 @@
private final ConcurrentMap pingedNodeMap = new ConcurrentHashMap<>();
- private volatile String currentDagName;
+ private volatile int currentDagId;
+ private volatile QueryIdentifierProto currentQueryIdentifierProto;
public LlapTaskCommunicator(
TaskCommunicatorContext taskCommunicatorContext) {
@@ -226,8 +229,9 @@ public void registerRunningTaskAttempt(final ContainerId containerId, final Task
int priority) {
super.registerRunningTaskAttempt(containerId, taskSpec, additionalResources, credentials,
credentialsChanged, priority);
- if (taskSpec.getDAGName() != currentDagName) {
- resetCurrentDag(taskSpec.getDAGName());
+ int dagId = taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId();
+ if (currentQueryIdentifierProto == null || (dagId != currentQueryIdentifierProto.getDagIdentifier())) {
+ resetCurrentDag(dagId);
}
@@ -251,7 +255,7 @@ public void registerRunningTaskAttempt(final ContainerId containerId, final Task
nodesForQuery.add(nodeId);
sourceStateTracker.registerTaskForStateUpdates(host, port, taskSpec.getInputs());
- FragmentRuntimeInfo fragmentRuntimeInfo = sourceStateTracker.getFragmentRuntimeInfo(taskSpec.getDAGName(),
+ FragmentRuntimeInfo fragmentRuntimeInfo = sourceStateTracker.getFragmentRuntimeInfo(
taskSpec.getVertexName(), taskSpec.getTaskAttemptID().getTaskID().getId(), priority);
SubmitWorkRequestProto requestProto;
@@ -349,7 +353,7 @@ private void sendTaskTerminated(final TezTaskAttemptID taskAttemptId,
// NodeId can be null if the task gets unregistered due to failure / being killed by the daemon itself
if (nodeId != null) {
TerminateFragmentRequestProto request =
- TerminateFragmentRequestProto.newBuilder().setDagName(currentDagName)
+ TerminateFragmentRequestProto.newBuilder().setQueryIdentifier(currentQueryIdentifierProto)
.setFragmentIdentifierString(taskAttemptId.toString()).build();
communicator.sendTerminateFragment(request, nodeId.getHostname(), nodeId.getPort(),
new LlapDaemonProtocolClientProxy.ExecuteRequestCallback() {
@@ -370,12 +374,16 @@ public void indicateError(Throwable t) {
}
}
+
+
+
@Override
- public void dagComplete(final String dagName) {
- QueryCompleteRequestProto request = QueryCompleteRequestProto.newBuilder().setDagName(
- dagName).setDeleteDelay(deleteDelayOnDagComplete).build();
+ public void dagComplete(final int dagIdentifier) {
+ QueryCompleteRequestProto request = QueryCompleteRequestProto.newBuilder()
+ .setQueryIdentifier(constructQueryIdentifierProto(dagIdentifier))
+ .setDeleteDelay(deleteDelayOnDagComplete).build();
for (final LlapNodeId llapNodeId : nodesForQuery) {
- LOG.info("Sending dagComplete message for {}, to {}", dagName, llapNodeId);
+ LOG.info("Sending dagComplete message for {}, to {}", dagIdentifier, llapNodeId);
communicator.sendQueryComplete(request, llapNodeId.getHostname(), llapNodeId.getPort(),
new LlapDaemonProtocolClientProxy.ExecuteRequestCallback() {
@Override
@@ -384,7 +392,7 @@ public void setResponse(LlapDaemonProtocolProtos.QueryCompleteResponseProto resp
@Override
public void indicateError(Throwable t) {
- LOG.warn("Failed to indicate dag complete dagId={} to node {}", dagName, llapNodeId);
+ LOG.warn("Failed to indicate dag complete dagId={} to node {}", dagIdentifier, llapNodeId);
}
});
}
@@ -495,12 +503,12 @@ void nodePinged(String hostname, int port) {
}
}
- private void resetCurrentDag(String newDagName) {
+ private void resetCurrentDag(int newDagId) {
// Working on the assumption that a single DAG runs at a time per AM.
- currentDagName = newDagName;
- sourceStateTracker.resetState(newDagName);
+ currentQueryIdentifierProto = constructQueryIdentifierProto(newDagId);
+ sourceStateTracker.resetState(newDagId);
nodesForQuery.clear();
- LOG.info("CurrentDag set to: " + newDagName);
+ LOG.info("CurrentDagId set to: " + newDagId + ", name=" + getContext().getCurrentDagName());
// TODO Is it possible for heartbeats to come in from lost tasks - those should be told to die, which
// is likely already happening.
}
@@ -518,10 +526,12 @@ private SubmitWorkRequestProto constructSubmitWorkRequest(ContainerId containerI
// Credentials can change across DAGs. Ideally construct only once per DAG.
taskCredentials.addAll(getContext().getCredentials());
- ByteBuffer credentialsBinary = credentialMap.get(taskSpec.getDAGName());
+ Preconditions.checkState(currentQueryIdentifierProto.getDagIdentifier() ==
+ taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId());
+ ByteBuffer credentialsBinary = credentialMap.get(currentQueryIdentifierProto);
if (credentialsBinary == null) {
credentialsBinary = serializeCredentials(getContext().getCredentials());
- credentialMap.putIfAbsent(taskSpec.getDAGName(), credentialsBinary.duplicate());
+ credentialMap.putIfAbsent(currentQueryIdentifierProto, credentialsBinary.duplicate());
} else {
credentialsBinary = credentialsBinary.duplicate();
}
@@ -736,4 +746,10 @@ void unregisterContainer(ContainerId containerId) {
}
}
+
+ private QueryIdentifierProto constructQueryIdentifierProto(int dagIdentifier) {
+ return QueryIdentifierProto.newBuilder()
+ .setAppIdentifier(getContext().getCurrentAppIdentifier()).setDagIdentifier(dagIdentifier)
+ .build();
+ }
}
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/helpers/SourceStateTracker.java b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/helpers/SourceStateTracker.java
index 066fae5f513e757c3b22b904541a9eb6b494068e..fded9bff32c73732387eff0c7260be3e2f078870 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/helpers/SourceStateTracker.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/tezplugins/helpers/SourceStateTracker.java
@@ -24,6 +24,8 @@
import java.util.Set;
import org.apache.commons.lang3.mutable.MutableInt;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto;
+import org.apache.tez.serviceplugins.api.TaskCommunicatorContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.llap.LlapNodeId;
@@ -31,7 +33,6 @@
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
import org.apache.hadoop.hive.llap.tezplugins.Converters;
import org.apache.hadoop.hive.llap.tezplugins.LlapTaskCommunicator;
-import org.apache.tez.dag.api.TaskCommunicatorContext;
import org.apache.tez.dag.api.event.VertexState;
import org.apache.tez.mapreduce.input.MRInput;
import org.apache.tez.mapreduce.input.MRInputLegacy;
@@ -45,28 +46,33 @@
private final TaskCommunicatorContext taskCommunicatorContext;
private final LlapTaskCommunicator taskCommunicator;
+ private final QueryIdentifierProto BASE_QUERY_IDENTIFIER;
+
// Tracks vertices for which notifications have been registered
private final Set notificationRegisteredVertices = new HashSet<>();
private final Map sourceInfoMap = new HashMap<>();
private final Map nodeInfoMap = new HashMap<>();
- private volatile String currentDagName;
+ private volatile QueryIdentifierProto currentQueryIdentifier;
public SourceStateTracker(TaskCommunicatorContext taskCommunicatorContext,
LlapTaskCommunicator taskCommunicator) {
this.taskCommunicatorContext = taskCommunicatorContext;
this.taskCommunicator = taskCommunicator;
+ BASE_QUERY_IDENTIFIER = QueryIdentifierProto.newBuilder()
+ .setAppIdentifier(taskCommunicatorContext.getCurrentAppIdentifier()).build();
}
/**
* To be invoked after each DAG completes.
*/
- public synchronized void resetState(String newDagName) {
+ public synchronized void resetState(int newDagId) {
sourceInfoMap.clear();
nodeInfoMap.clear();
notificationRegisteredVertices.clear();
- this.currentDagName = newDagName;
+ this.currentQueryIdentifier =
+ QueryIdentifierProto.newBuilder(BASE_QUERY_IDENTIFIER).setDagIdentifier(newDagId).build();
}
/**
@@ -139,16 +145,16 @@ public synchronized void sourceStateUpdated(String sourceName, VertexState sourc
}
+ // Assumes serialized DAGs within an AM, and a reset of structures after each DAG completes.
/**
* Constructs FragmentRuntimeInfo for scheduling within LLAP daemons.
* Also caches state based on state updates.
- * @param dagName
* @param vertexName
* @param fragmentNumber
* @param priority
* @return
*/
- public synchronized FragmentRuntimeInfo getFragmentRuntimeInfo(String dagName, String vertexName, int fragmentNumber,
+ public synchronized FragmentRuntimeInfo getFragmentRuntimeInfo(String vertexName, int fragmentNumber,
int priority) {
FragmentRuntimeInfo.Builder builder = FragmentRuntimeInfo.newBuilder();
maybeRegisterForVertexUpdates(vertexName);
@@ -282,9 +288,8 @@ private boolean isSourceOfInterest(InputSpec inputSpec) {
void sendStateUpdateToNode(LlapNodeId nodeId, String sourceName, VertexState state) {
taskCommunicator.sendStateUpdate(nodeId.getHostname(), nodeId.getPort(),
- SourceStateUpdatedRequestProto.newBuilder().setDagName(currentDagName).setSrcName(
- sourceName)
- .setState(Converters.fromVertexState(state)).build());
+ SourceStateUpdatedRequestProto.newBuilder().setQueryIdentifier(currentQueryIdentifier)
+ .setSrcName(sourceName).setState(Converters.fromVertexState(state)).build());
}
diff --git a/llap-server/src/main/resources/package.py b/llap-server/src/main/resources/package.py
index 5620483e4706051e6a17cbb4ad9c0bdb0b41b09b..0204b7bbf1fbb9080a9435d393c9429d4916d89e 100644
--- a/llap-server/src/main/resources/package.py
+++ b/llap-server/src/main/resources/package.py
@@ -60,17 +60,34 @@ def main(args):
parser.add_argument("--args", default="")
parser.add_argument("--name", default="llap0")
parser.add_argument("--loglevel", default="INFO")
- parser.add_argument("--chaosmonkey", type=int, default="0")
+ parser.add_argument("--chaosmonkey", type=int, default=0)
+ parser.add_argument("--slider-keytab-dir", default="")
+ parser.add_argument("--slider-keytab", default="")
+ parser.add_argument("--slider-principal", default="")
+ parser.add_argument("--slider-default-keytab", dest='slider_default_keytab', action='store_true')
+ parser.set_defaults(slider_default_keytab=False)
# Unneeded here for now: parser.add_argument("--hiveconf", action='append')
#parser.add_argument("--size") parser.add_argument("--xmx") parser.add_argument("--cache") parser.add_argument("--executors")
(args, unknown_args) = parser.parse_known_args(args)
input = args.input
output = args.output
+ slider_keytab_dir = args.slider_keytab_dir
+ slider_keytab = args.slider_keytab
+ slider_principal = args.slider_principal
+ # set the defaults only if the defaults are enabled
+ if args.slider_default_keytab:
+ if not slider_keytab_dir:
+ slider_keytab_dir = ".slider/keytabs/llap"
+ if not slider_keytab:
+ slider_keytab = "llap.keytab"
+ if not slider_principal:
+ slider_principal = "llap@EXAMPLE.COM"
if not input:
print "Cannot find input files"
sys.exit(1)
return
config = json_parse(open(join(input, "config.json")).read())
+ java_home = config["java.home"]
resource = LlapResource(config)
# 5% container failure every monkey_interval seconds
monkey_percentage = 5 # 5%
@@ -82,13 +99,16 @@ def main(args):
"container.mb" : resource.container_size,
"container.cores" : resource.container_cores,
"hadoop_home" : os.getenv("HADOOP_HOME"),
- "java_home" : os.getenv("JAVA_HOME"),
+ "java_home" : java_home,
"name" : args.name,
"daemon_args" : args.args,
"daemon_loglevel" : args.loglevel,
"monkey_interval" : args.chaosmonkey,
"monkey_percentage" : monkey_percentage,
- "monkey_enabled" : args.chaosmonkey > 0
+ "monkey_enabled" : args.chaosmonkey > 0,
+ "slider_keytab_dir" : slider_keytab_dir,
+ "slider_keytab" : slider_keytab,
+ "slider_principal" : slider_principal
}
if not exists(output):
@@ -141,3 +161,4 @@ def main(args):
if __name__ == "__main__":
main(sys.argv[1:])
+# vim: ai ts=4 noet sw=4 ft=python
diff --git a/llap-server/src/main/resources/templates.py b/llap-server/src/main/resources/templates.py
index 8c4e4c7d1bca8f5e91597c8b9476e1f774494512..277a49cb170cc7cc04718e489e2756a76faddde5 100644
--- a/llap-server/src/main/resources/templates.py
+++ b/llap-server/src/main/resources/templates.py
@@ -88,7 +88,10 @@
},
"components": {
"slider-appmaster": {
- "jvm.heapsize": "1024M"
+ "jvm.heapsize": "1024M",
+ "slider.hdfs.keytab.dir": "%(slider_keytab_dir)s",
+ "slider.am.login.keytab.name": "%(slider_keytab)s",
+ "slider.keytab.principal.name": "%(slider_principal)s"
}
}
}
@@ -107,18 +110,21 @@
"LLAP": {
"yarn.role.priority": "1",
"yarn.component.instances": "%(instances)d",
- "yarn.memory": "%(container.mb)d"
+ "yarn.memory": "%(container.mb)d",
+ "yarn.component.placement.policy" : "4"
}
}
}
"""
+# placement policy "4" is a bit-mask
+# only bit set is Slider PlacementPolicy.ANTI_AFFINITY_REQUIRED(4)
runner = """
#!/bin/bash -e
BASEDIR=$(dirname $0)
slider stop %(name)s
-slider destroy %(name)s --force
+slider destroy %(name)s --force || slider destroy %(name)s
slider install-package --name LLAP --package $BASEDIR/llap-%(version)s.zip --replacepkg
slider create %(name)s --resources $BASEDIR/resources.json --template $BASEDIR/appConfig.json
"""
diff --git a/llap-server/src/protobuf/LlapDaemonProtocol.proto b/llap-server/src/protobuf/LlapDaemonProtocol.proto
index a2d944fc2be3ef6aa1024fdba3d64fae13b44b54..944c96c6ec5cad856d0a7d987b84779f1f2da9f1 100644
--- a/llap-server/src/protobuf/LlapDaemonProtocol.proto
+++ b/llap-server/src/protobuf/LlapDaemonProtocol.proto
@@ -50,6 +50,7 @@ message GroupInputSpecProto {
message FragmentSpecProto {
optional string fragment_identifier_string = 1;
optional string dag_name = 2;
+ optional int32 dag_id = 11;
optional string vertex_name = 3;
optional EntityDescriptorProto processor_descriptor = 4;
repeated IOSpecProto input_specs = 5;
@@ -74,6 +75,11 @@ enum SourceStateProto {
S_RUNNING = 2;
}
+message QueryIdentifierProto {
+ optional string app_identifier = 1;
+ optional int32 dag_identifier = 2;
+}
+
message SubmitWorkRequestProto {
optional string container_id_string = 1;
optional string am_host = 2;
@@ -98,7 +104,7 @@ message SubmitWorkResponseProto {
}
message SourceStateUpdatedRequestProto {
- optional string dag_name = 1;
+ optional QueryIdentifierProto query_identifier = 1;
optional string src_name = 2;
optional SourceStateProto state = 3;
}
@@ -108,17 +114,16 @@ message SourceStateUpdatedResponseProto {
message QueryCompleteRequestProto {
optional string query_id = 1;
- optional string dag_name = 2;
- optional int64 delete_delay = 3 [default = 0];
+ optional QueryIdentifierProto query_identifier = 2;
+ optional int64 delete_delay = 4 [default = 0];
}
message QueryCompleteResponseProto {
}
message TerminateFragmentRequestProto {
- optional string query_id = 1;
- optional string dag_name = 2;
- optional string fragment_identifier_string = 7;
+ optional QueryIdentifierProto query_identifier = 1;
+ optional string fragment_identifier_string = 2;
}
message TerminateFragmentResponseProto {
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
index fc014a7974fb0a073c08f853b83b700a131bc4d0..7b04103a47b534b4c4599eb0c854580152fd1535 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
@@ -78,9 +78,8 @@ public void testVariableSizeMultiAllocs() throws Exception {
@Test
public void testSameSizes() throws Exception {
int min = 3, max = 8, maxAlloc = 1 << max;
- Configuration conf = createConf(1 << min, maxAlloc, maxAlloc, maxAlloc);
- BuddyAllocator a = new BuddyAllocator(conf, new DummyMemoryManager(),
- LlapDaemonCacheMetrics.create("test", "1"));
+ BuddyAllocator a = new BuddyAllocator(false, 1 << min, maxAlloc, maxAlloc, maxAlloc,
+ new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
for (int i = max; i >= min; --i) {
allocSameSize(a, 1 << (max - i), i);
}
@@ -89,18 +88,16 @@ public void testSameSizes() throws Exception {
@Test
public void testMultipleArenas() throws Exception {
int max = 8, maxAlloc = 1 << max, allocLog2 = max - 1, arenaCount = 5;
- Configuration conf = createConf(1 << 3, maxAlloc, maxAlloc, maxAlloc * arenaCount);
- BuddyAllocator a = new BuddyAllocator(conf, new DummyMemoryManager(),
- LlapDaemonCacheMetrics.create("test", "1"));
+ BuddyAllocator a = new BuddyAllocator(false, 1 << 3, maxAlloc, maxAlloc, maxAlloc * arenaCount,
+ new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
allocSameSize(a, arenaCount * 2, allocLog2);
}
@Test
public void testMTT() {
final int min = 3, max = 8, maxAlloc = 1 << max, allocsPerSize = 3;
- Configuration conf = createConf(1 << min, maxAlloc, maxAlloc * 8, maxAlloc * 24);
- final BuddyAllocator a = new BuddyAllocator(conf, new DummyMemoryManager(),
- LlapDaemonCacheMetrics.create("test", "1"));
+ final BuddyAllocator a = new BuddyAllocator(false, 1 << min, maxAlloc, maxAlloc * 8,
+ maxAlloc * 24, new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
ExecutorService executor = Executors.newFixedThreadPool(3);
final CountDownLatch cdlIn = new CountDownLatch(3), cdlOut = new CountDownLatch(1);
FutureTask upTask = new FutureTask(new Callable() {
@@ -143,8 +140,8 @@ public Void call() throws Exception {
@Test
public void testMTTArenas() {
final int min = 3, max = 4, maxAlloc = 1 << max, minAllocCount = 2048, threadCount = 4;
- Configuration conf = createConf(1 << min, maxAlloc, maxAlloc, (1 << min) * minAllocCount);
- final BuddyAllocator a = new BuddyAllocator(conf, new DummyMemoryManager(),
+ final BuddyAllocator a = new BuddyAllocator(false, 1 << min, maxAlloc, maxAlloc,
+ (1 << min) * minAllocCount, new DummyMemoryManager(),
LlapDaemonCacheMetrics.create("test", "1"));
ExecutorService executor = Executors.newFixedThreadPool(threadCount);
final CountDownLatch cdlIn = new CountDownLatch(threadCount), cdlOut = new CountDownLatch(1);
@@ -183,8 +180,8 @@ private void syncThreadStart(final CountDownLatch cdlIn, final CountDownLatch cd
private void testVariableSizeInternal(
int allocCount, int arenaSizeMult, int arenaCount) throws Exception {
int min = 3, max = 8, maxAlloc = 1 << max, arenaSize = maxAlloc * arenaSizeMult;
- Configuration conf = createConf(1 << min, maxAlloc, arenaSize, arenaSize * arenaCount);
- BuddyAllocator a = new BuddyAllocator(conf, new DummyMemoryManager(),
+ BuddyAllocator a = new BuddyAllocator(false, 1 << min, maxAlloc, arenaSize,
+ arenaSize * arenaCount, new DummyMemoryManager(),
LlapDaemonCacheMetrics.create("test", "1"));
allocateUp(a, min, max, allocCount, true);
allocateDown(a, min, max, allocCount, true);
@@ -279,13 +276,4 @@ private void deallocBuffers(
a.deallocate(mem);
}
}
-
- private Configuration createConf(int min, int max, int arena, int total) {
- Configuration conf = new Configuration();
- conf.setInt(ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname, min);
- conf.setInt(ConfVars.LLAP_ALLOCATOR_MAX_ALLOC.varname, max);
- conf.setInt(ConfVars.LLAP_ALLOCATOR_ARENA_COUNT.varname, total/arena);
- conf.setLong(ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname, total);
- return conf;
- }
}
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java
index 46e95473186c49155bc6decb136ac05b9afcfb3d..616c040e06b1ce4f5c83d74842d0447cb29f9fa5 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java
@@ -17,30 +17,34 @@
*/
package org.apache.hadoop.hive.llap.cache;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-
-import org.mockito.stubbing.Answer;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.mockito.invocation.InvocationOnMock;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
import org.junit.Assume;
import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestLowLevelLrfuCachePolicy {
private static final Logger LOG = LoggerFactory.getLogger(TestLowLevelLrfuCachePolicy.class);
@@ -49,13 +53,15 @@
public void testRegression_HIVE_12178() throws Exception {
LOG.info("Testing wrong list status after eviction");
EvictionTracker et = new EvictionTracker();
- int memSize = 2, lambda = 1; // Set lambda to 1 so the heap size becomes 1 (LRU).
- Configuration conf = createConf(1, memSize, (double)lambda);
- final LowLevelLrfuCachePolicy lrfu = new LowLevelLrfuCachePolicy(conf);
+ int memSize = 2;
+ Configuration conf = new Configuration();
+ // Set lambda to 1 so the heap size becomes 1 (LRU).
+ conf.setDouble(HiveConf.ConfVars.LLAP_LRFU_LAMBDA.varname, 1.0f);
+ final LowLevelLrfuCachePolicy lrfu = new LowLevelLrfuCachePolicy(1, memSize, conf);
Field f = LowLevelLrfuCachePolicy.class.getDeclaredField("listLock");
f.setAccessible(true);
ReentrantLock listLock = (ReentrantLock)f.get(lrfu);
- LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(conf, lrfu,
+ LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(memSize, lrfu,
LlapDaemonCacheMetrics.create("test", "1"));
lrfu.setEvictionListener(et);
final LlapDataBuffer buffer1 = LowLevelCacheImpl.allocateFake();
@@ -118,12 +124,12 @@ public void testLfuExtreme() {
int heapSize = 4;
LOG.info("Testing lambda 0 (LFU)");
Random rdm = new Random(1234);
- Configuration conf = createConf(1, heapSize);
+ Configuration conf = new Configuration();
ArrayList inserted = new ArrayList(heapSize);
conf.setFloat(HiveConf.ConfVars.LLAP_LRFU_LAMBDA.varname, 0.0f);
EvictionTracker et = new EvictionTracker();
- LowLevelLrfuCachePolicy lfu = new LowLevelLrfuCachePolicy(conf);
- LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(conf, lfu,
+ LowLevelLrfuCachePolicy lfu = new LowLevelLrfuCachePolicy(1, heapSize, conf);
+ LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(heapSize, lfu,
LlapDaemonCacheMetrics.create("test", "1"));
lfu.setEvictionListener(et);
for (int i = 0; i < heapSize; ++i) {
@@ -143,31 +149,17 @@ public void testLfuExtreme() {
verifyOrder(mm, lfu, et, inserted, null);
}
- private Configuration createConf(int min, int heapSize, Double lambda) {
- Configuration conf = new Configuration();
- conf.setInt(HiveConf.ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname, min);
- conf.setInt(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname, heapSize);
- if (lambda != null) {
- conf.setDouble(HiveConf.ConfVars.LLAP_LRFU_LAMBDA.varname, lambda.doubleValue());
- }
- return conf;
- }
-
- private Configuration createConf(int min, int heapSize) {
- return createConf(min, heapSize, null);
- }
-
@Test
public void testLruExtreme() {
int heapSize = 4;
LOG.info("Testing lambda 1 (LRU)");
Random rdm = new Random(1234);
- Configuration conf = createConf(1, heapSize);
+ Configuration conf = new Configuration();
ArrayList inserted = new ArrayList(heapSize);
conf.setFloat(HiveConf.ConfVars.LLAP_LRFU_LAMBDA.varname, 1.0f);
EvictionTracker et = new EvictionTracker();
- LowLevelLrfuCachePolicy lru = new LowLevelLrfuCachePolicy(conf);
- LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(conf, lru,
+ LowLevelLrfuCachePolicy lru = new LowLevelLrfuCachePolicy(1, heapSize, conf);
+ LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(heapSize, lru,
LlapDaemonCacheMetrics.create("test", "1"));
lru.setEvictionListener(et);
for (int i = 0; i < heapSize; ++i) {
@@ -192,9 +184,9 @@ public void testDeadlockResolution() {
LOG.info("Testing deadlock resolution");
ArrayList inserted = new ArrayList(heapSize);
EvictionTracker et = new EvictionTracker();
- Configuration conf = createConf(1, heapSize);
- LowLevelLrfuCachePolicy lrfu = new LowLevelLrfuCachePolicy(conf);
- LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(conf, lrfu,
+ Configuration conf = new Configuration();
+ LowLevelLrfuCachePolicy lrfu = new LowLevelLrfuCachePolicy(1, heapSize, conf);
+ LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(heapSize, lrfu,
LlapDaemonCacheMetrics.create("test", "1"));
lrfu.setEvictionListener(et);
for (int i = 0; i < heapSize; ++i) {
@@ -267,12 +259,12 @@ public Object answer(InvocationOnMock invocation) throws Throwable {
private void testHeapSize(int heapSize) {
LOG.info("Testing heap size " + heapSize);
Random rdm = new Random(1234);
- Configuration conf = createConf(1, heapSize);
+ Configuration conf = new Configuration();
conf.setFloat(HiveConf.ConfVars.LLAP_LRFU_LAMBDA.varname, 0.2f); // very small heap, 14 elements
EvictionTracker et = new EvictionTracker();
- LowLevelLrfuCachePolicy lrfu = new LowLevelLrfuCachePolicy(conf);
+ LowLevelLrfuCachePolicy lrfu = new LowLevelLrfuCachePolicy(1, heapSize, conf);
MetricsMock m = createMetricsMock();
- LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(conf, lrfu, m.metricsMock);
+ LowLevelCacheMemoryManager mm = new LowLevelCacheMemoryManager(heapSize, lrfu, m.metricsMock);
lrfu.setEvictionListener(et);
// Insert the number of elements plus 2, to trigger 2 evictions.
int toEvict = 2;
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
index 38af07ec72473f509c11210f18bc7811cc5f90d1..4d05c3535791deda2cc9302f53b2fd4e6e7be6b6 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
@@ -16,6 +16,7 @@
import static org.mockito.Mockito.mock;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
@@ -25,6 +26,7 @@
import org.apache.hadoop.hive.llap.daemon.FragmentCompletionHandler;
import org.apache.hadoop.hive.llap.daemon.KilledTaskHandler;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
import org.apache.hadoop.security.Credentials;
@@ -33,6 +35,7 @@
import org.apache.tez.dag.records.TezTaskAttemptID;
import org.apache.tez.dag.records.TezTaskID;
import org.apache.tez.dag.records.TezVertexID;
+import org.apache.tez.hadoop.shim.DefaultHadoopShim;
import org.apache.tez.runtime.api.impl.ExecutionContextImpl;
import org.apache.tez.runtime.task.EndReason;
import org.apache.tez.runtime.task.TaskRunner2Result;
@@ -48,18 +51,36 @@ public static MockRequest createMockRequest(int fragmentNum, int parallelism, lo
SubmitWorkRequestProto
requestProto = createSubmitWorkRequestProto(fragmentNum, parallelism,
startTime);
- MockRequest mockRequest = new MockRequest(requestProto, canFinish, workTime);
+ QueryFragmentInfo queryFragmentInfo = createQueryFragmentInfo(requestProto.getFragmentSpec());
+ MockRequest mockRequest = new MockRequest(requestProto, queryFragmentInfo, canFinish, workTime);
return mockRequest;
}
public static TaskExecutorService.TaskWrapper createTaskWrapper(
SubmitWorkRequestProto request, boolean canFinish, int workTime) {
- MockRequest mockRequest = new MockRequest(request, canFinish, workTime);
+ QueryFragmentInfo queryFragmentInfo = createQueryFragmentInfo(request.getFragmentSpec());
+ MockRequest mockRequest = new MockRequest(request, queryFragmentInfo, canFinish, workTime);
TaskExecutorService.TaskWrapper
taskWrapper = new TaskExecutorService.TaskWrapper(mockRequest, null);
return taskWrapper;
}
+ public static QueryFragmentInfo createQueryFragmentInfo(FragmentSpecProto fragmentSpecProto) {
+ QueryInfo queryInfo = createQueryInfo();
+ QueryFragmentInfo fragmentInfo =
+ new QueryFragmentInfo(queryInfo, "fakeVertexName", fragmentSpecProto.getFragmentNumber(), 0,
+ fragmentSpecProto);
+ return fragmentInfo;
+ }
+
+ public static QueryInfo createQueryInfo() {
+ QueryIdentifier queryIdentifier = new QueryIdentifier("fake_app_id_string", 1);
+ QueryInfo queryInfo =
+ new QueryInfo(queryIdentifier, "fake_app_id_string", "fake_dag_name", 1, "fakeUser",
+ new ConcurrentHashMap(),
+ new String[0], null);
+ return queryInfo;
+ }
public static SubmitWorkRequestProto createSubmitWorkRequestProto(
int fragmentNumber, int selfAndUpstreamParallelism,
@@ -80,7 +101,7 @@ public static SubmitWorkRequestProto createSubmitWorkRequestProto(
return SubmitWorkRequestProto
.newBuilder()
.setFragmentSpec(
- LlapDaemonProtocolProtos.FragmentSpecProto
+ FragmentSpecProto
.newBuilder()
.setAttemptNumber(0)
.setDagName("MockDag")
@@ -119,13 +140,13 @@ public static SubmitWorkRequestProto createSubmitWorkRequestProto(
private boolean shouldSleep = true;
private final Condition finishedCondition = lock.newCondition();
- public MockRequest(SubmitWorkRequestProto requestProto,
+ public MockRequest(SubmitWorkRequestProto requestProto, QueryFragmentInfo fragmentInfo,
boolean canFinish, long workTime) {
- super(requestProto, mock(QueryFragmentInfo.class), new Configuration(),
+ super(requestProto, fragmentInfo, new Configuration(),
new ExecutionContextImpl("localhost"), null, new Credentials(), 0, null, null, mock(
LlapDaemonExecutorMetrics.class),
mock(KilledTaskHandler.class), mock(
- FragmentCompletionHandler.class));
+ FragmentCompletionHandler.class), new DefaultHadoopShim());
this.workTime = workTime;
this.canFinish = canFinish;
}
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
new file mode 100644
index 0000000000000000000000000000000000000000..39a3865cb8f4a3d4e09ffb42d47c815c248be416
--- /dev/null
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestQueryIdentifier.java
@@ -0,0 +1,48 @@
+package org.apache.hadoop.hive.llap.daemon.impl;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+import org.junit.Test;
+
+public class TestQueryIdentifier {
+
+ @Test (timeout = 5000)
+ public void testEquality() {
+
+ String appIdString1 = "app1";
+ String appIdString2 = "app2";
+
+ int dagId1 = 1;
+ int dagId2 = 2;
+
+ QueryIdentifier[] queryIdentifiers = new QueryIdentifier[4];
+
+ queryIdentifiers[0] = new QueryIdentifier(appIdString1, dagId1);
+ queryIdentifiers[1] = new QueryIdentifier(appIdString1, dagId2);
+ queryIdentifiers[2] = new QueryIdentifier(appIdString2, dagId1);
+ queryIdentifiers[3] = new QueryIdentifier(appIdString2, dagId2);
+
+ for (int i = 0 ; i < 4 ; i++) {
+ for (int j = 0 ; j < 4 ; j++) {
+ if (i == j) {
+ assertEquals(queryIdentifiers[i], queryIdentifiers[j]);
+ } else {
+ assertNotEquals(queryIdentifiers[i], queryIdentifiers[j]);
+ }
+ }
+ }
+
+ QueryIdentifier q11 = new QueryIdentifier(appIdString1, dagId1);
+ QueryIdentifier q12 = new QueryIdentifier(appIdString1, dagId2);
+ QueryIdentifier q21 = new QueryIdentifier(appIdString2, dagId1);
+ QueryIdentifier q22 = new QueryIdentifier(appIdString2, dagId2);
+
+ assertEquals(queryIdentifiers[0], q11);
+ assertEquals(queryIdentifiers[1], q12);
+ assertEquals(queryIdentifiers[2], q21);
+ assertEquals(queryIdentifiers[3], q22);
+
+
+ }
+}
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestTaskExecutorService.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestTaskExecutorService.java
index 5491064e1c186c486788d77585c9ec80f7d57d77..d1edd124efd1c4ca3dbf2ac0fe1add643caf8228 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestTaskExecutorService.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestTaskExecutorService.java
@@ -189,7 +189,8 @@ public void testWaitQueuePreemption() throws InterruptedException {
public TaskExecutorServiceForTest(int numExecutors, int waitQueueSize, String waitQueueComparatorClassName,
boolean enablePreemption) {
- super(numExecutors, waitQueueSize, waitQueueComparatorClassName, enablePreemption);
+ super(numExecutors, waitQueueSize, waitQueueComparatorClassName, enablePreemption,
+ Thread.currentThread().getContextClassLoader());
}
private ConcurrentMap completionListeners = new ConcurrentHashMap<>();
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java
index ebfb43031381dc587bc5babc7e92ba9c2462202a..73df9851dcc1a5fc4f717079535136961fa97cf1 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java
@@ -39,6 +39,7 @@
import org.apache.tez.dag.records.TezTaskAttemptID;
import org.apache.tez.dag.records.TezTaskID;
import org.apache.tez.dag.records.TezVertexID;
+import org.apache.tez.hadoop.shim.DefaultHadoopShim;
import org.apache.tez.runtime.api.impl.ExecutionContextImpl;
import org.apache.tez.runtime.task.EndReason;
import org.apache.tez.runtime.task.TaskRunner2Result;
@@ -58,7 +59,7 @@ public MockRequest(SubmitWorkRequestProto requestProto,
super(requestProto, mock(QueryFragmentInfo.class), conf,
new ExecutionContextImpl("localhost"), null, cred, 0, null, null, null,
mock(KilledTaskHandler.class), mock(
- FragmentCompletionHandler.class));
+ FragmentCompletionHandler.class), new DefaultHadoopShim());
this.workTime = workTime;
this.canFinish = canFinish;
}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index ace644bc415b756d9e0bdd32ed9cb1ca40450f5e..7830f17077a5edc1fb4937b30e28bbc13d6208d7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -6406,7 +6406,10 @@ private static void startHouseKeeperService(HiveConf conf) throws Exception {
if(!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) {
return;
}
- Class c = Class.forName("org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService");
+ startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService"));
+ startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidCompactionHistoryService"));
+ }
+ private static void startHouseKeeperService(HiveConf conf, Class c) throws Exception {
//todo: when metastore adds orderly-shutdown logic, houseKeeper.stop()
//should be called form it
HouseKeeperService houseKeeper = (HouseKeeperService)c.newInstance();
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HouseKeeperService.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HouseKeeperService.java
index eb4ea93ac9e291b4c5fea0c90515c37581680d0c..539ace00675bd335a26853fa3e7b2ad5d42347ca 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HouseKeeperService.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HouseKeeperService.java
@@ -36,4 +36,10 @@
* Returns short description of services this module provides.
*/
public String getServiceDescription();
+
+ /**
+ * This is incremented each time the service is performed. Can be useful to
+ * check if serivce is still alive.
+ */
+ public int getIsAliveCounter();
}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index 0ffdbe0110fa5b69e9edc6c9f84d6c2121981cd6..317913d9f978287fba212f069aa8a5fc392f16d2 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -26,6 +26,7 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.FileMetadataHandler;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
@@ -273,6 +274,7 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException
Table tblCopy = tbl.deepCopy();
tblCopy.setDbName(HiveStringUtils.normalizeIdentifier(tblCopy.getDbName()));
tblCopy.setTableName(HiveStringUtils.normalizeIdentifier(tblCopy.getTableName()));
+ normalizeColumnNames(tblCopy);
getHBase().putTable(tblCopy);
commit = true;
} catch (IOException e) {
@@ -283,6 +285,24 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException
}
}
+ private void normalizeColumnNames(Table tbl) {
+ if (tbl.getSd().getCols() != null) {
+ tbl.getSd().setCols(normalizeFieldSchemaList(tbl.getSd().getCols()));
+ }
+ if (tbl.getPartitionKeys() != null) {
+ tbl.setPartitionKeys(normalizeFieldSchemaList(tbl.getPartitionKeys()));
+ }
+ }
+
+ private List normalizeFieldSchemaList(List fieldschemas) {
+ List ret = new ArrayList<>();
+ for (FieldSchema fieldSchema : fieldschemas) {
+ ret.add(new FieldSchema(HiveStringUtils.normalizeIdentifier(fieldSchema.getName()),
+ fieldSchema.getType(), fieldSchema.getComment()));
+ }
+ return ret;
+ }
+
@Override
public boolean dropTable(String dbName, String tableName) throws MetaException,
NoSuchObjectException, InvalidObjectException, InvalidInputException {
@@ -1674,8 +1694,22 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws
boolean commit = false;
openTransaction();
try {
+ //update table properties
+ List statsObjs = colStats.getStatsObj();
+ List colNames = new ArrayList<>();
+ for (ColumnStatisticsObj statsObj:statsObjs) {
+ colNames.add(statsObj.getColName());
+ }
+ String dbName = colStats.getStatsDesc().getDbName();
+ String tableName = colStats.getStatsDesc().getTableName();
+ Table newTable = getTable(dbName, tableName);
+ Table newTableCopy = newTable.deepCopy();
+ StatsSetupConst.setColumnStatsState(newTableCopy.getParameters(), colNames);
+ getHBase().replaceTable(newTable, newTableCopy);
+
getHBase().updateStatistics(colStats.getStatsDesc().getDbName(),
colStats.getStatsDesc().getTableName(), null, colStats);
+
commit = true;
return true;
} catch (IOException e) {
@@ -1687,17 +1721,32 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws
}
@Override
- public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats,
List partVals) throws
NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
boolean commit = false;
openTransaction();
try {
- getHBase().updateStatistics(statsObj.getStatsDesc().getDbName(),
- statsObj.getStatsDesc().getTableName(), partVals, statsObj);
+ // update partition properties
+ String db_name = colStats.getStatsDesc().getDbName();
+ String tbl_name = colStats.getStatsDesc().getTableName();
+ Partition oldPart = getHBase().getPartition(db_name, tbl_name, partVals);
+ Partition new_partCopy = oldPart.deepCopy();
+ List colNames = new ArrayList<>();
+ List statsObjs = colStats.getStatsObj();
+ for (ColumnStatisticsObj statsObj : statsObjs) {
+ colNames.add(statsObj.getColName());
+ }
+ StatsSetupConst.setColumnStatsState(new_partCopy.getParameters(), colNames);
+ getHBase().replacePartition(oldPart, new_partCopy,
+ HBaseUtils.getPartitionKeyTypes(getTable(db_name, tbl_name).getPartitionKeys()));
+
+ getHBase().updateStatistics(colStats.getStatsDesc().getDbName(),
+ colStats.getStatsDesc().getTableName(), partVals, colStats);
// We need to invalidate aggregates that include this partition
- getHBase().getStatsCache().invalidate(statsObj.getStatsDesc().getDbName(),
- statsObj.getStatsDesc().getTableName(), statsObj.getStatsDesc().getPartName());
+ getHBase().getStatsCache().invalidate(colStats.getStatsDesc().getDbName(),
+ colStats.getStatsDesc().getTableName(), colStats.getStatsDesc().getPartName());
+
commit = true;
return true;
} catch (IOException e) {
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
index d3cb7d508fb0aa698ce2836d304e059d67153b51..73255d2c6954616068a3dba7c19ffcb69cc318ae 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
@@ -19,6 +19,10 @@
import org.apache.hadoop.hive.metastore.api.CompactionType;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
/**
* Information on a possible or running compaction.
*/
@@ -27,13 +31,18 @@
public String dbname;
public String tableName;
public String partName;
+ char state;
public CompactionType type;
+ String workerId;
+ long start;
public String runAs;
public boolean tooManyAborts = false;
/**
- * {@code null} means it wasn't set (e.g. in case of upgrades)
+ * {@code 0} means it wasn't set (e.g. in case of upgrades, since ResultSet.getLong() will return 0 if field is NULL)
*/
- public Long highestTxnId;
+ public long highestTxnId;
+ byte[] metaInfo;
+ String hadoopJobId;
private String fullPartitionName = null;
private String fullTableName = null;
@@ -44,6 +53,11 @@ public CompactionInfo(String dbname, String tableName, String partName, Compacti
this.partName = partName;
this.type = type;
}
+ CompactionInfo(long id, String dbname, String tableName, String partName, char state) {
+ this(dbname, tableName, partName, null);
+ this.id = id;
+ this.state = state;
+ }
CompactionInfo() {}
public String getFullPartitionName() {
@@ -82,9 +96,47 @@ public String toString() {
"dbname:" + dbname + "," +
"tableName:" + tableName + "," +
"partName:" + partName + "," +
+ "state:" + state + "," +
"type:" + type + "," +
"runAs:" + runAs + "," +
"tooManyAborts:" + tooManyAborts + "," +
"highestTxnId:" + highestTxnId;
}
+
+ /**
+ * loads object from a row in Select * from COMPACTION_QUEUE
+ * @param rs ResultSet after call to rs.next()
+ * @throws SQLException
+ */
+ static CompactionInfo loadFullFromCompactionQueue(ResultSet rs) throws SQLException {
+ CompactionInfo fullCi = new CompactionInfo();
+ fullCi.id = rs.getLong(1);
+ fullCi.dbname = rs.getString(2);
+ fullCi.tableName = rs.getString(3);
+ fullCi.partName = rs.getString(4);
+ fullCi.state = rs.getString(5).charAt(0);//cq_state
+ fullCi.type = TxnHandler.dbCompactionType2ThriftType(rs.getString(6).charAt(0));
+ fullCi.workerId = rs.getString(7);
+ fullCi.start = rs.getLong(8);
+ fullCi.runAs = rs.getString(9);
+ fullCi.highestTxnId = rs.getLong(10);
+ fullCi.metaInfo = rs.getBytes(11);
+ fullCi.hadoopJobId = rs.getString(12);
+ return fullCi;
+ }
+ static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionInfo ci, long endTime) throws SQLException {
+ pStmt.setLong(1, ci.id);
+ pStmt.setString(2, ci.dbname);
+ pStmt.setString(3, ci.tableName);
+ pStmt.setString(4, ci.partName);
+ pStmt.setString(5, Character.toString(ci.state));
+ pStmt.setString(6, Character.toString(TxnHandler.thriftCompactionType2DbType(ci.type)));
+ pStmt.setString(7, ci.workerId);
+ pStmt.setLong(8, ci.start);
+ pStmt.setLong(9, endTime);
+ pStmt.setString(10, ci.runAs);
+ pStmt.setLong(11, ci.highestTxnId);
+ pStmt.setBytes(12, ci.metaInfo);
+ pStmt.setString(13, ci.hadoopJobId);
+ }
}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 9130322013311589616806c9efbdbeb3e2b1fb64..18b288df313266b40049ca276ecdc27c524546b5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -174,16 +174,7 @@ public CompactionInfo findNextToCompact(String workerId) throws MetaException {
info.dbname = rs.getString(2);
info.tableName = rs.getString(3);
info.partName = rs.getString(4);
- switch (rs.getString(5).charAt(0)) {
- case MAJOR_TYPE:
- info.type = CompactionType.MAJOR;
- break;
- case MINOR_TYPE:
- info.type = CompactionType.MINOR;
- break;
- default:
- throw new MetaException("Unexpected compaction type " + rs.getString(5));
- }
+ info.type = dbCompactionType2ThriftType(rs.getString(5).charAt(0));
// Now, update this record as being worked on by this worker.
long now = getDbTime(dbConn);
s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', " +
@@ -291,8 +282,7 @@ public void markCompacted(CompactionInfo info) throws MetaException {
default: throw new MetaException("Unexpected compaction type " + rs.getString(5));
}
info.runAs = rs.getString(6);
- long highestTxnId = rs.getLong(7);
- info.highestTxnId = rs.wasNull() ? null : highestTxnId;
+ info.highestTxnId = rs.getLong(7);
rc.add(info);
}
LOG.debug("Going to rollback");
@@ -323,13 +313,19 @@ public void markCleaned(CompactionInfo info) throws MetaException {
try {
Connection dbConn = null;
Statement stmt = null;
+ PreparedStatement pStmt = null;
ResultSet rs = null;
try {
- //do we need serializable? Once we have the HWM as above, no. Before that
- //it's debatable, but problem described above applies either way
- //Thus can drop to RC
- dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+ dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
+ rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = " + info.id);
+ if(rs.next()) {
+ info = CompactionInfo.loadFullFromCompactionQueue(rs);
+ }
+ else {
+ throw new IllegalStateException("No record with CQ_ID=" + info.id + " found in COMPACTION_QUEUE");
+ }
+ close(rs);
String s = "delete from COMPACTION_QUEUE where cq_id = " + info.id;
LOG.debug("Going to execute update <" + s + ">");
int updCount = stmt.executeUpdate(s);
@@ -338,6 +334,10 @@ public void markCleaned(CompactionInfo info) throws MetaException {
LOG.debug("Going to rollback");
dbConn.rollback();
}
+ pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?)");
+ info.state = SUCCEEDED_STATE;
+ CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn));
+ updCount = pStmt.executeUpdate();
// Remove entries from completed_txn_components as well, so we don't start looking there
// again but only up to the highest txn ID include in this compaction job.
@@ -347,7 +347,7 @@ public void markCleaned(CompactionInfo info) throws MetaException {
if (info.partName != null) {
s += " and ctc_partition = '" + info.partName + "'";
}
- if(info.highestTxnId != null) {
+ if(info.highestTxnId != 0) {
s += " and ctc_txnid <= " + info.highestTxnId;
}
LOG.debug("Going to execute update <" + s + ">");
@@ -358,7 +358,7 @@ public void markCleaned(CompactionInfo info) throws MetaException {
s = "select distinct txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '" +
TXN_ABORTED + "' and tc_database = '" + info.dbname + "' and tc_table = '" +
- info.tableName + "'" + (info.highestTxnId == null ? "" : " and txn_id <= " + info.highestTxnId);
+ info.tableName + "'" + (info.highestTxnId == 0 ? "" : " and txn_id <= " + info.highestTxnId);
if (info.partName != null) s += " and tc_partition = '" + info.partName + "'";
LOG.debug("Going to execute update <" + s + ">");
rs = stmt.executeQuery(s);
@@ -406,6 +406,7 @@ public void markCleaned(CompactionInfo info) throws MetaException {
throw new MetaException("Unable to connect to transaction database " +
StringUtils.stringifyException(e));
} finally {
+ closeStmt(pStmt);
close(rs, stmt, dbConn);
}
} catch (RetryException e) {
@@ -668,6 +669,225 @@ public void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) thro
setCompactionHighestTxnId(ci, highestTxnId);
}
}
+ private static class RetentionCounters {
+ int attemptedRetention = 0;
+ int failedRetention = 0;
+ int succeededRetention = 0;
+ RetentionCounters(int attemptedRetention, int failedRetention, int succeededRetention) {
+ this.attemptedRetention = attemptedRetention;
+ this.failedRetention = failedRetention;
+ this.succeededRetention = succeededRetention;
+ }
+ }
+ private void checkForDeletion(List deleteSet, CompactionInfo ci, RetentionCounters rc) {
+ switch (ci.state) {
+ case ATTEMPTED_STATE:
+ if(--rc.attemptedRetention < 0) {
+ deleteSet.add(ci.id);
+ }
+ break;
+ case FAILED_STATE:
+ if(--rc.failedRetention < 0) {
+ deleteSet.add(ci.id);
+ }
+ break;
+ case SUCCEEDED_STATE:
+ if(--rc.succeededRetention < 0) {
+ deleteSet.add(ci.id);
+ }
+ break;
+ default:
+ //do nothing to hanlde future RU/D where we may want to add new state types
+ }
+ }
+
+ /**
+ * For any given compactable entity (partition, table if not partitioned) the history of compactions
+ * may look like "sssfffaaasffss", for example. The idea is to retain the tail (most recent) of the
+ * history such that a configurable number of each type of state is present. Any other entries
+ * can be purged. This scheme has advantage of always retaining the last failure/success even if
+ * it's not recent.
+ * @throws MetaException
+ */
+ public void purgeCompactionHistory() throws MetaException {
+ Connection dbConn = null;
+ Statement stmt = null;
+ ResultSet rs = null;
+ List deleteSet = new ArrayList<>();
+ RetentionCounters rc = null;
+ try {
+ try {
+ dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+ stmt = dbConn.createStatement();
+ /*cc_id is monotonically increasing so for any entity sorts in order of compaction history,
+ thus this query groups by entity and withing group sorts most recent first*/
+ rs = stmt.executeQuery("select cc_id, cc_database, cc_table, cc_partition, cc_state from " +
+ "COMPLETED_COMPACTIONS order by cc_database, cc_table, cc_partition, cc_id desc");
+ String lastCompactedEntity = null;
+ /*In each group, walk from most recent and count occurences of each state type. Once you
+ * have counted enough (for each state) to satisfy retention policy, delete all other
+ * instances of this status.*/
+ while(rs.next()) {
+ CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0));
+ if(!ci.getFullPartitionName().equals(lastCompactedEntity)) {
+ lastCompactedEntity = ci.getFullPartitionName();
+ rc = new RetentionCounters(conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
+ getFailedCompactionRetention(),
+ conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED));
+ }
+ checkForDeletion(deleteSet, ci, rc);
+ }
+ close(rs);
+
+ String baseDeleteSql = "delete from COMPLETED_COMPACTIONS where cc_id IN(";
+ StringBuilder queryStr = new StringBuilder(baseDeleteSql);
+ for(int i = 0; i < deleteSet.size(); i++) {
+ if(i > 0 && i % TIMED_OUT_TXN_ABORT_BATCH_SIZE == 0) {
+ queryStr.setCharAt(queryStr.length() - 1, ')');
+ stmt.executeUpdate(queryStr.toString());
+ dbConn.commit();
+ queryStr = new StringBuilder(baseDeleteSql);
+ }
+ queryStr.append(deleteSet.get(i)).append(',');
+ }
+ if(queryStr.length() > baseDeleteSql.length()) {
+ queryStr.setCharAt(queryStr.length() - 1, ')');
+ int updCnt = stmt.executeUpdate(queryStr.toString());
+ dbConn.commit();
+ }
+ dbConn.commit();
+ } catch (SQLException e) {
+ rollbackDBConn(dbConn);
+ checkRetryable(dbConn, e, "purgeCompactionHistory()");
+ throw new MetaException("Unable to connect to transaction database " +
+ StringUtils.stringifyException(e));
+ } finally {
+ close(rs, stmt, dbConn);
+ }
+ } catch (RetryException ex) {
+ purgeCompactionHistory();
+ }
+ }
+ /**
+ * this ensures that the number of failed compaction entries retained is > than number of failed
+ * compaction threshold which prevents new compactions from being scheduled.
+ */
+ public int getFailedCompactionRetention() {
+ int failedThreshold = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+ int failedRetention = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED);
+ if(failedRetention < failedThreshold) {
+ LOG.warn("Invalid configuration " + HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname +
+ "=" + failedRetention + " < " + HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED + "=" +
+ failedRetention + ". Will use " + HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname +
+ "=" + failedRetention);
+ failedRetention = failedThreshold;
+ }
+ return failedRetention;
+ }
+ /**
+ * Returns {@code true} if there already exists sufficient number of consecutive failures for
+ * this table/partition so that no new automatic compactions will be scheduled.
+ * User initiated compactions don't do this check.
+ *
+ * Do we allow compacting whole table (when it's partitioned)? No, though perhaps we should.
+ * That would be a meta operations, i.e. first find all partitions for this table (which have
+ * txn info) and schedule each compaction separately. This avoids complications in this logic.
+ */
+ public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException {
+ Connection dbConn = null;
+ Statement stmt = null;
+ ResultSet rs = null;
+ try {
+ try {
+ dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+ stmt = dbConn.createStatement();
+ rs = stmt.executeQuery("select CC_STATE from COMPLETED_COMPACTIONS where " +
+ "CC_DATABASE = " + quoteString(ci.dbname) + " and " +
+ "CC_TABLE = " + quoteString(ci.tableName) +
+ (ci.partName != null ? "and CC_PARTITION = " + quoteString(ci.partName) : "") +
+ " order by CC_ID desc");
+ int numFailed = 0;
+ int numTotal = 0;
+ int failedThreshold = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+ while(rs.next() && ++numTotal <= failedThreshold) {
+ if(rs.getString(1).charAt(0) == FAILED_STATE) {
+ numFailed++;
+ }
+ else {
+ numFailed--;
+ }
+ }
+ return numFailed == failedThreshold;
+ }
+ catch (SQLException e) {
+ LOG.error("Unable to delete from compaction queue " + e.getMessage());
+ LOG.debug("Going to rollback");
+ rollbackDBConn(dbConn);
+ checkRetryable(dbConn, e, "checkFailedCompactions(" + ci + ")");
+ LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(e));
+ return false;//weren't able to check
+ } finally {
+ close(rs, stmt, dbConn);
+ }
+ } catch (RetryException e) {
+ return checkFailedCompactions(ci);
+ }
+ }
+ /**
+ * If there is an entry in compaction_queue with ci.id, remove it
+ * Make entry in completed_compactions with status 'f'.
+ *
+ * but what abount markCleaned() which is called when table is had been deleted...
+ */
+ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this should not throw
+ //todo: this shoudl take "comment" as parameter to set in CC_META_INFO to provide some context for the failure
+ try {
+ Connection dbConn = null;
+ Statement stmt = null;
+ PreparedStatement pStmt = null;
+ ResultSet rs = null;
+ try {
+ dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+ stmt = dbConn.createStatement();
+ rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = " + ci.id);
+ if(rs.next()) {
+ ci = CompactionInfo.loadFullFromCompactionQueue(rs);
+ String s = "delete from COMPACTION_QUEUE where cq_id = " + ci.id;
+ LOG.debug("Going to execute update <" + s + ">");
+ int updCnt = stmt.executeUpdate(s);
+ }
+ else {
+ throw new IllegalStateException("No record with CQ_ID=" + ci.id + " found in COMPACTION_QUEUE");
+ }
+ close(rs, stmt, null);
+
+ pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?)");
+ ci.state = FAILED_STATE;
+ CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn));
+ int updCount = pStmt.executeUpdate();
+ LOG.debug("Going to commit");
+ closeStmt(pStmt);
+ dbConn.commit();
+ } catch (SQLException e) {
+ LOG.error("Unable to delete from compaction queue " + e.getMessage());
+ LOG.debug("Going to rollback");
+ rollbackDBConn(dbConn);
+ try {
+ checkRetryable(dbConn, e, "markFailed(" + ci + ")");
+ }
+ catch(MetaException ex) {
+ LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
+ }
+ LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(e));
+ } finally {
+ close(rs, stmt, null);
+ close(null, pStmt, dbConn);
+ }
+ } catch (RetryException e) {
+ markFailed(ci);
+ }
+ }
+
}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 201552625f28b6246820181afffda8bdc6b8eff6..2a7545c41f79032f5a10653d5ed0d89309aba198 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -118,10 +118,27 @@ public static void prepDb() throws Exception {
" CQ_WORKER_ID varchar(128)," +
" CQ_START bigint," +
" CQ_RUN_AS varchar(128)," +
- " CQ_HIGHEST_TXN_ID bigint)");
+ " CQ_HIGHEST_TXN_ID bigint," +
+ " CQ_META_INFO varchar(2048) for bit data," +
+ " CQ_HADOOP_JOB_ID varchar(32))");
stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
+
+ stmt.execute("CREATE TABLE COMPLETED_COMPACTIONS (" +
+ " CC_ID bigint PRIMARY KEY," +
+ " CC_DATABASE varchar(128) NOT NULL," +
+ " CC_TABLE varchar(128) NOT NULL," +
+ " CC_PARTITION varchar(767)," +
+ " CC_STATE char(1) NOT NULL," +
+ " CC_TYPE char(1) NOT NULL," +
+ " CC_WORKER_ID varchar(128)," +
+ " CC_START bigint," +
+ " CC_END bigint," +
+ " CC_RUN_AS varchar(128)," +
+ " CC_HIGHEST_TXN_ID bigint," +
+ " CC_META_INFO varchar(2048) for bit data," +
+ " CC_HADOOP_JOB_ID varchar(32))");
conn.commit();
} catch (SQLException e) {
@@ -161,7 +178,7 @@ public static void cleanDb() throws Exception {
dropTable(stmt, "NEXT_LOCK_ID");
dropTable(stmt, "COMPACTION_QUEUE");
dropTable(stmt, "NEXT_COMPACTION_QUEUE_ID");
-
+ dropTable(stmt, "COMPLETED_COMPACTIONS");
conn.commit();
} finally {
closeResources(conn, stmt, null);
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 50d88924a2bfe70b03d9ef0687d7ffa861f7a7b7..c836f80e8f3f7c6eb728deac0b674ab128ba9a69 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -64,14 +64,20 @@
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class TxnHandler {
- // Compactor states
+ // Compactor states (Should really be enum)
static final public String INITIATED_RESPONSE = "initiated";
static final public String WORKING_RESPONSE = "working";
static final public String CLEANING_RESPONSE = "ready for cleaning";
+ static final public String FAILED_RESPONSE = "failed";
+ static final public String SUCCEEDED_RESPONSE = "succeeded";
+ static final public String ATTEMPTED_RESPONSE = "attempted";
static final protected char INITIATED_STATE = 'i';
static final protected char WORKING_STATE = 'w';
static final protected char READY_FOR_CLEANING = 'r';
+ static final char FAILED_STATE = 'f';
+ static final char SUCCEEDED_STATE = 's';
+ static final char ATTEMPTED_STATE = 'a';
// Compactor types
static final protected char MAJOR_TYPE = 'a';
@@ -759,7 +765,7 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst
}
}
- public void compact(CompactionRequest rqst) throws MetaException {
+ public long compact(CompactionRequest rqst) throws MetaException {
// Put a compaction request in the queue.
try {
Connection dbConn = null;
@@ -826,6 +832,7 @@ public void compact(CompactionRequest rqst) throws MetaException {
stmt.executeUpdate(s);
LOG.debug("Going to commit");
dbConn.commit();
+ return id;
} catch (SQLException e) {
LOG.debug("Going to rollback");
rollbackDBConn(dbConn);
@@ -837,7 +844,7 @@ public void compact(CompactionRequest rqst) throws MetaException {
closeDbConn(dbConn);
}
} catch (RetryException e) {
- compact(rqst);
+ return compact(rqst);
}
}
@@ -850,7 +857,13 @@ public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaExcep
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
String s = "select cq_database, cq_table, cq_partition, cq_state, cq_type, cq_worker_id, " +
- "cq_start, cq_run_as from COMPACTION_QUEUE";
+ "cq_start, -1 cc_end, cq_run_as, cq_hadoop_job_id, cq_id from COMPACTION_QUEUE union all " +
+ "select cc_database, cc_table, cc_partition, cc_state, cc_type, cc_worker_id, " +
+ "cc_start, cc_end, cc_run_as, cc_hadoop_job_id, cc_id from COMPLETED_COMPACTIONS";
+ //what I want is order by cc_end desc, cc_start asc (but derby has a bug https://issues.apache.org/jira/browse/DERBY-6013)
+ //to sort so that currently running jobs are at the end of the list (bottom of screen)
+ //and currently running ones are in sorted by start time
+ //w/o order by likely currently running compactions will be first (LHS of Union)
LOG.debug("Going to execute query <" + s + ">");
ResultSet rs = stmt.executeQuery(s);
while (rs.next()) {
@@ -862,16 +875,26 @@ public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaExcep
case INITIATED_STATE: e.setState(INITIATED_RESPONSE); break;
case WORKING_STATE: e.setState(WORKING_RESPONSE); break;
case READY_FOR_CLEANING: e.setState(CLEANING_RESPONSE); break;
- default: throw new MetaException("Unexpected compaction state " + rs.getString(4));
+ case FAILED_STATE: e.setState(FAILED_RESPONSE); break;
+ case SUCCEEDED_STATE: e.setState(SUCCEEDED_RESPONSE); break;
+ default:
+ //do nothing to handle RU/D if we add another status
}
switch (rs.getString(5).charAt(0)) {
case MAJOR_TYPE: e.setType(CompactionType.MAJOR); break;
case MINOR_TYPE: e.setType(CompactionType.MINOR); break;
- default: throw new MetaException("Unexpected compaction type " + rs.getString(5));
+ default:
+ //do nothing to handle RU/D if we add another status
}
e.setWorkerid(rs.getString(6));
e.setStart(rs.getLong(7));
- e.setRunAs(rs.getString(8));
+ long endTime = rs.getLong(8);
+ if(endTime != -1) {
+ e.setEndTime(endTime);
+ }
+ e.setRunAs(rs.getString(9));
+ e.setHadoopJobId(rs.getString(10));
+ long id = rs.getLong(11);//for debugging
response.addToCompacts(e);
}
LOG.debug("Going to rollback");
@@ -2374,41 +2397,29 @@ private String addForUpdateClause(Connection dbConn, String selectStatement) thr
throw new MetaException(msg);
}
}
- /**
- * the caller is expected to retry if this fails
- *
- * @return
- * @throws SQLException
- * @throws MetaException
- */
- private long generateNewExtLockId() throws SQLException, MetaException {
- Connection dbConn = null;
- Statement stmt = null;
- ResultSet rs = null;
- try {
- dbConn = getDbConn(getRequiredIsolationLevel());
- stmt = dbConn.createStatement();
-
- // Get the next lock id.
- String s = addForUpdateClause(dbConn, "select nl_next from NEXT_LOCK_ID");
- LOG.debug("Going to execute query <" + s + ">");
- rs = stmt.executeQuery(s);
- if (!rs.next()) {
- LOG.debug("Going to rollback");
- dbConn.rollback();
- throw new MetaException("Transaction tables not properly " +
- "initialized, no record found in next_lock_id");
- }
- long extLockId = rs.getLong(1);
- s = "update NEXT_LOCK_ID set nl_next = " + (extLockId + 1);
- LOG.debug("Going to execute update <" + s + ">");
- stmt.executeUpdate(s);
- LOG.debug("Going to commit.");
- dbConn.commit();
- return extLockId;
+ static String quoteString(String input) {
+ return "'" + input + "'";
+ }
+ static CompactionType dbCompactionType2ThriftType(char dbValue) {
+ switch (dbValue) {
+ case MAJOR_TYPE:
+ return CompactionType.MAJOR;
+ case MINOR_TYPE:
+ return CompactionType.MINOR;
+ default:
+ LOG.warn("Unexpected compaction type " + dbValue);
+ return null;
}
- finally {
- close(rs, stmt, dbConn);
+ }
+ static Character thriftCompactionType2DbType(CompactionType ct) {
+ switch (ct) {
+ case MAJOR:
+ return MAJOR_TYPE;
+ case MINOR:
+ return MINOR_TYPE;
+ default:
+ LOG.warn("Unexpected compaction type " + ct);
+ return null;
}
}
}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
index 22582d212bae3e016ce0ce0976a0b4ff808e3586..29d5a64154fdb9ab9586fb919fbeae7106b1a874 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
@@ -820,6 +820,7 @@ public void booleanTableStatistics() throws Exception {
@Test
public void longTableStatistics() throws Exception {
+ createMockTable(LONG_TYPE);
// Add a long table stats for LONG_COL to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
@@ -856,6 +857,7 @@ public void longTableStatistics() throws Exception {
@Test
public void doubleTableStatistics() throws Exception {
+ createMockTable(DOUBLE_TYPE);
// Add a double table stats for DOUBLE_COL to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
@@ -892,6 +894,7 @@ public void doubleTableStatistics() throws Exception {
@Test
public void stringTableStatistics() throws Exception {
+ createMockTable(STRING_TYPE);
// Add a string table stats for STRING_COL to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
@@ -928,6 +931,7 @@ public void stringTableStatistics() throws Exception {
@Test
public void binaryTableStatistics() throws Exception {
+ createMockTable(BINARY_TYPE);
// Add a binary table stats for BINARY_COL to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
@@ -963,6 +967,7 @@ public void binaryTableStatistics() throws Exception {
@Test
public void decimalTableStatistics() throws Exception {
+ createMockTable(DECIMAL_TYPE);
// Add a decimal table stats for DECIMAL_COL to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
@@ -1278,6 +1283,21 @@ private Table createMockTableAndPartition(String partType, String partVal) throw
store.addPartition(part);
return table;
}
+
+ private Table createMockTable(String type) throws Exception {
+ List cols = new ArrayList();
+ cols.add(new FieldSchema("col1", type, ""));
+ SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+ Map params = new HashMap();
+ params.put("key", "value");
+ StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
+ serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
+ int currentTime = (int)(System.currentTimeMillis() / 1000);
+ Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols,
+ emptyParameters, null, null, null);
+ store.createTable(table);
+ return table;
+ }
/**
* Returns a dummy table level ColumnStatisticsDesc with default values
*/
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java
index 0fe25e64c2e48a8378a9d07ec2fb85d079050517..cfe9cd04468c83f8edd2bff9fcb4dc667ff79298 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java
@@ -327,12 +327,20 @@ public void dropPartition() throws Exception {
// each stat type separately. We'll test them together in hte integration tests.
@Test
public void booleanTableStatistics() throws Exception {
- // Because of the way our mock implementation works we actually need to not create the table
- // before we set statistics on it.
long now = System.currentTimeMillis();
String dbname = "default";
String tableName = "statstable";
String boolcol = "boolcol";
+ int startTime = (int)(System.currentTimeMillis() / 1000);
+ List cols = new ArrayList();
+ cols.add(new FieldSchema(boolcol, "boolean", "nocomment"));
+ SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+ StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+ serde, null, null, emptyParameters);
+ Table table = new Table(tableName, dbname, "me", startTime, startTime, 0, sd, null,
+ emptyParameters, null, null, null);
+ store.createTable(table);
+
long trues = 37;
long falses = 12;
long booleanNulls = 2;
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
index 06e0932997d56693cb71919ea1af85c007956389..ff2c2c100f870c6ca5663eb8c000e5167d0eb4b2 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
@@ -219,7 +219,8 @@ public void testMarkCleaned() throws Exception {
assertEquals(0, txnHandler.findReadyToClean().size());
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
- assertEquals(0, rsp.getCompactsSize());
+ assertEquals(1, rsp.getCompactsSize());
+ assertTrue(TxnHandler.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
}
@Test
diff --git a/orc/pom.xml b/orc/pom.xml
index fef23e83dd216f50a877dd6de375fffa3a3f5ab2..2d80c97473553dc5ed145a697740734aff204110 100644
--- a/orc/pom.xml
+++ b/orc/pom.xml
@@ -48,6 +48,28 @@
org.apache.hadoop
hadoop-common
${hadoop.version}
+
+
+ javax.servlet
+ servlet-api
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ org.mortbay.jetty
+ jetty
+
+
+ org.mortbay.jetty
+ jetty-util
+
+
+ org.apache.avro
+ avro
+
+
org.iq80.snappy
diff --git a/orc/src/test/org/apache/orc/impl/TestColumnStatisticsImpl.java b/orc/src/test/org/apache/orc/impl/TestColumnStatisticsImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..61655261719cfab7e53e34d64f33dc55d7991a75
--- /dev/null
+++ b/orc/src/test/org/apache/orc/impl/TestColumnStatisticsImpl.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.orc.impl;
+
+import org.apache.hadoop.hive.serde2.io.DateWritable;
+import org.apache.orc.OrcProto;
+import org.apache.orc.TypeDescription;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestColumnStatisticsImpl {
+
+ @Test
+ public void testUpdateDate() throws Exception {
+ ColumnStatisticsImpl stat = ColumnStatisticsImpl.create(TypeDescription.createDate());
+ DateWritable date = new DateWritable(16400);
+ stat.increment();
+ stat.updateDate(date);
+ assertDateStatistics(stat, 1, 16400, 16400);
+
+ date.set(16410);
+ stat.increment();
+ stat.updateDate(date);
+ assertDateStatistics(stat, 2, 16400, 16410);
+
+ date.set(16420);
+ stat.increment();
+ stat.updateDate(date);
+ assertDateStatistics(stat, 3, 16400, 16420);
+ }
+
+ private void assertDateStatistics(ColumnStatisticsImpl stat, int count, int minimum, int maximum) {
+ OrcProto.ColumnStatistics.Builder builder = stat.serialize();
+
+ assertEquals(count, builder.getNumberOfValues());
+ assertTrue(builder.hasDateStatistics());
+ assertFalse(builder.hasStringStatistics());
+
+ OrcProto.DateStatistics protoStat = builder.getDateStatistics();
+ assertTrue(protoStat.hasMinimum());
+ assertEquals(minimum, protoStat.getMinimum());
+ assertTrue(protoStat.hasMaximum());
+ assertEquals(maximum, protoStat.getMaximum());
+ }
+}
diff --git a/pom.xml b/pom.xml
index 848432c587913965f7b1191e543499df4a417788..2066518aae829a9a41716bf3b3e551dc633cd8f9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -139,6 +139,7 @@
2.4.2
5.5.23
0.3.2
+ 3.0.0.v201112011016
5.5.1
3.0.1
7.6.0.v20120127
@@ -163,7 +164,7 @@
1.0.1
1.7.5
4.0.4
- 0.8.1-alpha
+ 0.8.2
2.2.0
1.5.0
2.10
diff --git a/ql/pom.xml b/ql/pom.xml
index 5075185825c92d9fc277024e1cafce376a8c3622..358cd2aff5df950b4e46bece99b4bb6f03a3deeb 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -201,15 +201,15 @@
hadoop-common
${hadoop.version}
-
- org.slf4j
- slf4j-log4j12
-
-
- commmons-logging
- commons-logging
-
-
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ commmons-logging
+ commons-logging
+
+
javax.servlet
servlet-api
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
index 6f7ee6a4ac685aef5bdd15513030506281060c47..d153fd688d19d22847e500db062fe209da311fea 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
@@ -235,15 +235,17 @@ public class extends VectorAggregateExpression {
int batchSize,
int[] selection,
boolean[] isNull) {
+
+ if (isNull[0]) {
+ return;
+ }
for (int i=0; i < batchSize; ++i) {
- if (!isNull[selection[i]]) {
- Aggregation myagg = getCurrentAggregationBuffer(
- aggregationBufferSets,
- bufferIndex,
- i);
- myagg.sumValue(value);
- }
+ Aggregation myagg = getCurrentAggregationBuffer(
+ aggregationBufferSets,
+ bufferIndex,
+ i);
+ myagg.sumValue(value);
}
}
@@ -255,14 +257,16 @@ public class extends VectorAggregateExpression {
int batchSize,
boolean[] isNull) {
+ if (isNull[0]) {
+ return;
+ }
+
for (int i=0; i < batchSize; ++i) {
- if (!isNull[i]) {
- Aggregation myagg = getCurrentAggregationBuffer(
- aggregationBufferSets,
- bufferIndex,
- i);
- myagg.sumValue(value);
- }
+ Aggregation myagg = getCurrentAggregationBuffer(
+ aggregationBufferSets,
+ bufferIndex,
+ i);
+ myagg.sumValue(value);
}
}
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
index 0595f716555aa38eb02dee92fb3d9307cfcddf84..46d66bddf27a732fcc463884d33df68b2c6dad17 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
@@ -214,15 +214,17 @@ public class extends VectorAggregateExpression {
int batchSize,
int[] selection,
boolean[] isNull) {
+
+ if (isNull[0]) {
+ return;
+ }
for (int i=0; i < batchSize; ++i) {
- if (!isNull[selection[i]]) {
- Aggregation myagg = getCurrentAggregationBuffer(
- aggregationBufferSets,
- aggregrateIndex,
- i);
- myagg.checkValue(value);
- }
+ Aggregation myagg = getCurrentAggregationBuffer(
+ aggregationBufferSets,
+ aggregrateIndex,
+ i);
+ myagg.checkValue(value);
}
}
@@ -234,14 +236,16 @@ public class extends VectorAggregateExpression {
int batchSize,
boolean[] isNull) {
+ if (isNull[0]) {
+ return;
+ }
+
for (int i=0; i < batchSize; ++i) {
- if (!isNull[i]) {
- Aggregation myagg = getCurrentAggregationBuffer(
- aggregationBufferSets,
- aggregrateIndex,
- i);
- myagg.checkValue(value);
- }
+ Aggregation myagg = getCurrentAggregationBuffer(
+ aggregationBufferSets,
+ aggregrateIndex,
+ i);
+ myagg.checkValue(value);
}
}
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
index 6912ceda3a80c93c938686f73b9729a67b04f22f..9a48171a8e1c68d7b5289ed5f8cb87ed18de28b1 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
@@ -245,14 +245,16 @@ public class extends VectorAggregateExpression {
int batchSize,
boolean[] isNull) {
+ if (isNull[0]) {
+ return;
+ }
+
for (int i=0; i < batchSize; ++i) {
- if (!isNull[i]) {
- Aggregation myagg = getCurrentAggregationBuffer(
- aggregationBufferSets,
- aggregrateIndex,
- i);
- myagg.checkValue(value, scale);
- }
+ Aggregation myagg = getCurrentAggregationBuffer(
+ aggregationBufferSets,
+ aggregrateIndex,
+ i);
+ myagg.checkValue(value, scale);
}
}
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
index 4f707338d2a52091b22d828b802c98605b36b674..cc7e54dfd754cf611bace0c654855071edb84d2a 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
@@ -210,15 +210,17 @@ public class extends VectorAggregateExpression {
int batchSize,
int[] selection,
boolean[] isNull) {
-
+
+ if (isNull[0]) {
+ return;
+ }
+
for (int i=0; i < batchSize; ++i) {
- if (!isNull[selection[i]]) {
- Aggregation myagg = getCurrentAggregationBuffer(
- aggregationBufferSets,
- aggregateIndex,
- i);
- myagg.sumValue(value);
- }
+ Aggregation myagg = getCurrentAggregationBuffer(
+ aggregationBufferSets,
+ aggregateIndex,
+ i);
+ myagg.sumValue(value);
}
}
@@ -230,14 +232,16 @@ public class extends VectorAggregateExpression {
int batchSize,
boolean[] isNull) {
+ if (isNull[0]) {
+ return;
+ }
+
for (int i=0; i < batchSize; ++i) {
- if (!isNull[i]) {
- Aggregation myagg = getCurrentAggregationBuffer(
- aggregationBufferSets,
- aggregateIndex,
- i);
- myagg.sumValue(value);
- }
+ Aggregation myagg = getCurrentAggregationBuffer(
+ aggregationBufferSets,
+ aggregateIndex,
+ i);
+ myagg.sumValue(value);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/CompilationOpContext.java b/ql/src/java/org/apache/hadoop/hive/ql/CompilationOpContext.java
new file mode 100644
index 0000000000000000000000000000000000000000..949f8738f8bf2a707e0b4514ef2aa5ca48ed7dd6
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/CompilationOpContext.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A subset of compilation context that is passed to operators to get rid of some globals.
+ * Perhaps this should be rolled into main Context; however, some code necessitates storing the
+ * context in the operators for now, so this may not be advisable given how much stuff the main
+ * Context class contains.
+ * For now, only the operator sequence ID lives here.
+ */
+public class CompilationOpContext {
+ private final AtomicInteger opSeqId = new AtomicInteger(0);
+
+ public int nextOperatorId() {
+ return opSeqId.getAndIncrement();
+ }
+}
\ No newline at end of file
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index affaec8df9c1bbd6c81f7093b3f70c7d1943107d..746456bf39d1a5d700b42d3e0f2c0dcd4001e1ad 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -67,6 +67,7 @@
private int resDirFilesNum;
boolean initialized;
String originalTracker = null;
+ private final CompilationOpContext opContext;
private final Map pathToCS = new ConcurrentHashMap();
// scratch path to use for all non-local (ie. hdfs) file system tmp folders
@@ -133,6 +134,7 @@ public Context(Configuration conf, String executionId) {
localScratchDir = new Path(SessionState.getLocalSessionPath(conf), executionId).toUri().getPath();
scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
stagingDir = HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR);
+ opContext = new CompilationOpContext();
}
@@ -715,4 +717,7 @@ public void setCboSucceeded(boolean cboSucceeded) {
this.cboSucceeded = cboSucceeded;
}
+ public CompilationOpContext getOpContext() {
+ return opContext;
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 020f037324ee0c99b017329a6aaf40fa77e39a92..75187cf85122024b73248d6e52da9aa04b108ebc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -36,6 +36,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
+import com.google.common.collect.Sets;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.slf4j.Logger;
@@ -168,7 +169,7 @@ private boolean checkConcurrency() {
@Override
public void init() {
- Operator.resetId();
+ // Nothing for now.
}
/**
@@ -493,7 +494,7 @@ public void run() {
// initialize FetchTask right here
if (plan.getFetchTask() != null) {
- plan.getFetchTask().initialize(conf, plan, null);
+ plan.getFetchTask().initialize(conf, plan, null, ctx.getOpContext());
}
//do the authorization check
@@ -571,7 +572,7 @@ private String getExplainOutput(BaseSemanticAnalyzer sem, QueryPlan plan,
ASTNode astTree) throws IOException {
String ret = null;
ExplainTask task = new ExplainTask();
- task.initialize(conf, plan, null);
+ task.initialize(conf, plan, null, ctx.getOpContext());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
try {
@@ -596,12 +597,27 @@ private String getExplainOutput(BaseSemanticAnalyzer sem, QueryPlan plan,
*/
public static void doAuthorization(BaseSemanticAnalyzer sem, String command)
throws HiveException, AuthorizationException {
- HashSet inputs = sem.getInputs();
- HashSet outputs = sem.getOutputs();
SessionState ss = SessionState.get();
HiveOperation op = ss.getHiveOperation();
Hive db = sem.getDb();
+ Set additionalInputs = new HashSet();
+ for (Entity e : sem.getInputs()) {
+ if (e.getType() == Entity.Type.PARTITION) {
+ additionalInputs.add(new ReadEntity(e.getTable()));
+ }
+ }
+
+ Set additionalOutputs = new HashSet();
+ for (Entity e : sem.getOutputs()) {
+ if (e.getType() == Entity.Type.PARTITION) {
+ additionalOutputs.add(new WriteEntity(e.getTable(), WriteEntity.WriteType.DDL_NO_LOCK));
+ }
+ }
+
+ Set inputs = Sets.union(sem.getInputs(), additionalInputs);
+ Set outputs = Sets.union(sem.getOutputs(), additionalOutputs);
+
if (ss.isAuthorizationModeV2()) {
// get mapping of tables to columns used
ColumnAccessInfo colAccessInfo = sem.getColumnAccessInfo();
@@ -798,8 +814,8 @@ private static void getTablePartitionUsedColumns(HiveOperation op, BaseSemanticA
}
}
- private static void doAuthorizationV2(SessionState ss, HiveOperation op, HashSet inputs,
- HashSet outputs, String command, Map> tab2cols,
+ private static void doAuthorizationV2(SessionState ss, HiveOperation op, Set inputs,
+ Set outputs, String command, Map> tab2cols,
Map> updateTab2Cols) throws HiveException {
/* comment for reviewers -> updateTab2Cols needed to be separate from tab2cols because if I
@@ -819,7 +835,7 @@ private static void doAuthorizationV2(SessionState ss, HiveOperation op, HashSet
}
private static List getHivePrivObjects(
- HashSet extends Entity> privObjects, Map> tableName2Cols) {
+ Set extends Entity> privObjects, Map> tableName2Cols) {
List hivePrivobjs = new ArrayList();
if(privObjects == null){
return hivePrivobjs;
@@ -1759,7 +1775,7 @@ private TaskRunner launchTask(Task extends Serializable> tsk, String queryId,
cxt.incCurJobNo(1);
console.printInfo("Launching Job " + cxt.getCurJobNo() + " out of " + jobs);
}
- tsk.initialize(conf, plan, cxt);
+ tsk.initialize(conf, plan, cxt, ctx.getOpContext());
TaskResult tskRes = new TaskResult();
TaskRunner tskRun = new TaskRunner(tsk, tskRes);
@@ -1849,7 +1865,7 @@ public void resetFetch() throws IOException {
throw new IOException("Error closing the current fetch task", e);
}
// FetchTask should not depend on the plan.
- fetchTask.initialize(conf, null, null);
+ fetchTask.initialize(conf, null, null, ctx.getOpContext());
} else {
ctx.resetStream();
resStream = null;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 6a625927aee2daff39b83c1dbd8113f072f7ba2e..08bc654c03ad00725cac597ac496bb68f56edd77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -436,6 +436,15 @@
HIVE_GROUPING_SETS_AGGR_NOMAPAGGR_MULTIGBY(10315,
"Grouping sets aggregations (with rollups or cubes) are not allowed when " +
"HIVEMULTIGROUPBYSINGLEREDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets"),
+ CANNOT_RETRIEVE_TABLE_METADATA(10316, "Error while retrieving table metadata"),
+ CANNOT_DROP_INDEX(10317, "Error while dropping index"),
+ INVALID_AST_TREE(10318, "Internal error : Invalid AST"),
+ ERROR_SERIALIZE_METASTORE(10319, "Error while serializing the metastore objects"),
+ IO_ERROR(10320, "Error while peforming IO operation "),
+ ERROR_SERIALIZE_METADATA(10321, "Error while serializing the metadata"),
+ INVALID_LOAD_TABLE_FILE_WORK(10322, "Invalid Load Table Work or Load File Work"),
+ CLASSPATH_ERROR(10323, "Classpath error"),
+ IMPORT_SEMANTIC_ERROR(10324, "Import Semantic Analyzer Error"),
//========================== 20000 range starts here ========================//
SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "
@@ -503,7 +512,10 @@
"schema.evolution.columns / schema.evolution.columns.types " +
"nor the " +
"columns / columns.types " +
- "are set. Table schema information is required to read ACID tables")
+ "are set. Table schema information is required to read ACID tables"),
+ ACID_TABLES_MUST_BE_READ_WITH_ACID_READER(30021, "An ORC ACID reader required to read ACID tables"),
+ ACID_TABLES_MUST_BE_READ_WITH_HIVEINPUTFORMAT(30022, "Must use HiveInputFormat to read ACID tables " +
+ "(set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat)")
;
private int errorCode;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
index a3ec0e126b0214f7480743519747e2abf881fcaf..f99bf11c6db58c8594741e6375f87371a4e41454 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
import org.apache.hadoop.hive.ql.plan.FileMergeDesc;
@@ -61,6 +62,15 @@
protected Set incompatFileSet;
protected transient DynamicPartitionCtx dpCtx;
+ /** Kryo ctor. */
+ protected AbstractFileMergeOperator() {
+ super();
+ }
+
+ public AbstractFileMergeOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
public void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
index 73026883c96f27b30e0bcb9b937f6f08cb06beb7..69ba4a290f5d6f17d14750ff043909a43922c815 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
@@ -25,6 +25,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
@@ -50,7 +51,13 @@
transient int numMapRowsRead;
- public AbstractMapJoinOperator() {
+ /** Kryo ctor. */
+ protected AbstractMapJoinOperator() {
+ super();
+ }
+
+ public AbstractMapJoinOperator(CompilationOpContext ctx) {
+ super(ctx);
}
public AbstractMapJoinOperator(AbstractMapJoinOperator extends MapJoinDesc> mjop) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
index 71141770be0e66c179231cbb573443a42522085e..743098b064eda2658b941fba41442a43e64953d0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.tez.TezContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc;
@@ -51,6 +52,15 @@
protected transient boolean hasReachedMaxSize = false;
protected transient long MAX_SIZE;
+ /** Kryo ctor. */
+ protected AppMasterEventOperator() {
+ super();
+ }
+
+ public AppMasterEventOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
public void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java
index e2f4f58920c45351d90e8e1506022d5abf1743eb..27ddf13a2137df5425e65caf8cd964af9429195e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java
@@ -24,6 +24,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.CollectDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -42,6 +43,15 @@
protected transient ObjectInspector standardRowInspector;
transient int maxSize;
+ /** Kryo ctor. */
+ protected CollectOperator() {
+ super();
+ }
+
+ public CollectOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
index f6fbe74704a0c09d626c567aca896d21b2dfb721..79144719fbe27d787a6cc77073886eb5e883e170 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -81,9 +82,10 @@ public ColumnStatsTask() {
}
@Override
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
- super.initialize(conf, queryPlan, ctx);
- work.initializeForFetch();
+ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx,
+ CompilationOpContext opContext) {
+ super.initialize(conf, queryPlan, ctx, opContext);
+ work.initializeForFetch(opContext);
try {
JobConf job = new JobConf(conf);
ftOp = new FetchOperator(work.getfWork(), job);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
index dcbbe2e6ac8432a9a15281675ff49200f11ced15..a1b98f4e084f98a7b5ea2a7e485b6cc20b628b59 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -68,8 +69,9 @@
.getLogger(ColumnStatsUpdateTask.class);
@Override
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
- super.initialize(conf, queryPlan, ctx);
+ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx,
+ CompilationOpContext opContext) {
+ super.initialize(conf, queryPlan, ctx, opContext);
}
private ColumnStatistics constructColumnStatsFromInput()
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
index b0170f5d683b16175e2678c407bdf8581903e1cb..f8520f8e3c04c97d3b709eeffc863353080ae60f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
@@ -31,6 +31,7 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.persistence.AbstractRowContainer;
import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -125,17 +126,23 @@
protected transient int heartbeatInterval;
protected static final int NOTSKIPBIGTABLE = -1;
- public CommonJoinOperator() {
+ /** Kryo ctor. */
+ protected CommonJoinOperator() {
+ super();
+ }
+
+ public CommonJoinOperator(CompilationOpContext ctx) {
+ super(ctx);
}
public CommonJoinOperator(CommonJoinOperator clone) {
+ super(clone.id, clone.cContext);
this.joinEmitInterval = clone.joinEmitInterval;
this.joinCacheSize = clone.joinCacheSize;
this.nextSz = clone.nextSz;
this.childOperators = clone.childOperators;
this.parentOperators = clone.parentOperators;
this.done = clone.done;
- this.operatorId = clone.operatorId;
this.storage = clone.storage;
this.condn = clone.condn;
this.conf = clone.getConf();
@@ -150,7 +157,6 @@ public CommonJoinOperator(CommonJoinOperator clone) {
this.groupKeyObject = clone.groupKeyObject;
this.handleSkewJoin = clone.handleSkewJoin;
this.hconf = clone.hconf;
- this.id = clone.id;
this.inputObjInspectors = clone.inputObjInspectors;
this.noOuterJoin = clone.noOuterJoin;
this.numAliases = clone.numAliases;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index 1cbd13d543a5928a2ac60b16a87a76885c8beb3e..8693200f9b164cf50e14db9a2a4b3ede99146415 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -33,6 +33,7 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
import org.apache.hadoop.hive.ql.exec.tez.RecordSource;
import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordSource;
@@ -90,10 +91,15 @@
new ArrayList>();
transient Set fetchInputAtClose;
- public CommonMergeJoinOperator() {
+ /** Kryo ctor. */
+ protected CommonMergeJoinOperator() {
super();
}
+ public CommonMergeJoinOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@SuppressWarnings("unchecked")
@Override
public void initializeOp(Configuration hconf) throws HiveException {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java
index 031331e2ffa94447bfb97624365403eed6fac4f1..c96c8135a344049e57167559c4d760b876a42ca5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java
@@ -72,11 +72,6 @@ public boolean hasReduce() {
}
@Override
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) {
- super.initialize(conf, queryPlan, driverContext);
- }
-
- @Override
public int execute(DriverContext driverContext) {
resTasks = resolver.getTasks(conf, resolverCtx);
resolved = true;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 5f3a9cff488471d50b5e09daeeb84839b4f78830..995d2227a24706b74bd8da84e9bc775d0340ab29 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -64,6 +64,7 @@
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.TxnInfo;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -260,8 +261,9 @@ public DDLTask() {
}
@Override
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
- super.initialize(conf, queryPlan, ctx);
+ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx,
+ CompilationOpContext opContext) {
+ super.initialize(conf, queryPlan, ctx, opContext);
// Pick the formatter to use to display the results. Either the
// normal human readable output or a json object.
@@ -507,7 +509,7 @@ public int execute(DriverContext driverContext) {
AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
if (mergeFilesDesc != null) {
- return mergeFiles(db, mergeFilesDesc);
+ return mergeFiles(db, mergeFilesDesc, driverContext);
}
AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
@@ -609,8 +611,8 @@ private DataOutputStream getOutputStream(Path outputFile) throws HiveException {
* @return
* @throws HiveException
*/
- private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc)
- throws HiveException {
+ private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc,
+ DriverContext driverContext) throws HiveException {
ListBucketingCtx lbCtx = mergeFilesDesc.getLbCtx();
boolean lbatc = lbCtx == null ? false : lbCtx.isSkewedStoredAsDir();
int lbd = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel();
@@ -642,7 +644,8 @@ private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc)
fmd.setListBucketingDepth(lbd);
fmd.setOutputPath(mergeFilesDesc.getOutputDir());
- Operator extends OperatorDesc> mergeOp = OperatorFactory.get(fmd);
+ CompilationOpContext opContext = driverContext.getCtx().getOpContext();
+ Operator extends OperatorDesc> mergeOp = OperatorFactory.get(opContext, fmd);
LinkedHashMap> aliasToWork =
new LinkedHashMap>();
@@ -662,7 +665,7 @@ private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc)
}
// initialize the task and execute
- task.initialize(db.getConf(), getQueryPlan(), driverCxt);
+ task.initialize(db.getConf(), getQueryPlan(), driverCxt, opContext);
int ret = task.execute(driverCxt);
return ret;
}
@@ -4270,7 +4273,7 @@ private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws H
truncateWork.setMapperCannotSpanPartns(true);
DriverContext driverCxt = new DriverContext();
ColumnTruncateTask taskExec = new ColumnTruncateTask();
- taskExec.initialize(db.getConf(), null, driverCxt);
+ taskExec.initialize(db.getConf(), null, driverCxt, null);
taskExec.setWork(truncateWork);
taskExec.setQueryPlan(this.getQueryPlan());
return taskExec.execute(driverCxt);
@@ -4314,6 +4317,8 @@ private int exchangeTablePartition(Hive db,
for(Partition partition : partitions) {
// Reuse the partition specs from dest partition since they should be the same
+ work.getInputs().add(new ReadEntity(new Partition(sourceTable, partition.getSpec(), null)));
+
work.getOutputs().add(new WriteEntity(new Partition(sourceTable, partition.getSpec(), null),
WriteEntity.WriteType.DELETE));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
index 0888c7b65867dd075bb3a8ac119a8beec9697c8b..b897c16820a5299845b69c72b4afeafbb8b6d056 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
@@ -30,6 +30,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.DemuxDesc;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
@@ -109,6 +110,15 @@
// its children's parents lists, also see childOperatorsTag in Operator) at here.
private int[][] newChildOperatorsTag;
+ /** Kryo ctor. */
+ protected DemuxOperator() {
+ super();
+ }
+
+ public DemuxOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
index 0c1257096ed042d3c5cab86c08e76e15475b9636..06a38846cff69f288f4d92246a10a9fb107544b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
@@ -23,6 +23,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.DummyStoreDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -69,10 +70,15 @@
protected transient InspectableObject result;
- public DummyStoreOperator() {
+ /** Kryo ctor. */
+ protected DummyStoreOperator() {
super();
}
+ public DummyStoreOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
index 16341436f5d3c3b338bfb2de28f6705bb5c73681..0b0c3363977fecea13c63c082674102f3615ba72 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
@@ -26,9 +26,11 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.CommandNeedRetryException;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.exec.mr.ExecMapper;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -59,9 +61,10 @@ public FetchTask() {
}
@Override
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
- super.initialize(conf, queryPlan, ctx);
- work.initializeForFetch();
+ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx,
+ CompilationOpContext opContext) {
+ super.initialize(conf, queryPlan, ctx, opContext);
+ work.initializeForFetch(opContext);
try {
// Create a file system handle
@@ -75,6 +78,8 @@ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
job, ts.getNeededColumnIDs(), ts.getNeededColumns());
// push down filters
HiveInputFormat.pushFilters(job, ts);
+
+ AcidUtils.setTransactionalTableScan(job, ts.getConf().isAcidTable());
}
sink = work.getSink();
fetch = new FetchOperator(work, job, source, getVirtualColumns(source));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 32bfcf58c7553f6a1eb32d8fc5b610185ca2e92f..3289cfc0b2df70cf624a0e429d5416bb9158eabf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
@@ -322,6 +323,15 @@ private void initializeSpecPath() {
childSpecPathDynLinkedPartitions = conf.getDirName().getName();
}
+ /** Kryo ctor. */
+ protected FileSinkOperator() {
+ super();
+ }
+
+ public FileSinkOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
@@ -900,7 +910,7 @@ protected FSPaths getDynOutPaths(List row, String lbDirName) throws Hive
pathKey = dpDir;
if(conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) {
String buckNum = row.get(row.size() - 1);
- taskId = Utilities.replaceTaskIdFromFilename(Utilities.getTaskId(hconf), buckNum);
+ taskId = Utilities.replaceTaskIdFromFilename(taskId, buckNum);
pathKey = appendToSource(taskId, dpDir);
}
FSPaths fsp2 = valToPaths.get(pathKey);
@@ -1145,7 +1155,6 @@ private void publishStats() throws HiveException {
return;
}
- String taskID = Utilities.getTaskIdFromFilename(Utilities.getTaskId(hconf));
String spSpec = conf.getStaticSpec();
for (Map.Entry entry : valToPaths.entrySet()) {
@@ -1155,7 +1164,7 @@ private void publishStats() throws HiveException {
// for bucketed tables, hive.optimize.sort.dynamic.partition optimization
// adds the taskId to the fspKey.
if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) {
- taskID = Utilities.getTaskIdFromFilename(fspKey);
+ String taskID = Utilities.getTaskIdFromFilename(fspKey);
// if length of (prefix/ds=__HIVE_DEFAULT_PARTITION__/000000_0) is greater than max key prefix
// and if (prefix/ds=10/000000_0) is less than max key prefix, then former will get hashed
// to a smaller prefix (MD5hash/000000_0) and later will stored as such in staging stats table.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
index 0e7e79dc864a63aaf69e37d90dcf41cc33826739..08f26337d755317edc1a54fed9dfe3d26dc6a355 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.io.IOContext;
import org.apache.hadoop.hive.ql.io.IOContextMap;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -45,8 +46,13 @@
private transient IOContext ioContext;
protected transient int heartbeatInterval;
- public FilterOperator() {
+ /** Kryo ctor. */
+ protected FilterOperator() {
super();
+ }
+
+ public FilterOperator(CompilationOpContext ctx) {
+ super(ctx);
consecutiveSearches = 0;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java
index 7a4c58ae84a0f68b64288bf33857f50d19d3aa5e..2df7cca8ddee4ac6cf2aa2bf76e2120e73d4bf47 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java
@@ -23,6 +23,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.ForwardDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -61,6 +62,15 @@ static public String getOperatorName() {
return "FOR";
}
+ /** Kryo ctor. */
+ protected ForwardOperator() {
+ super();
+ }
+
+ public ForwardOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
index ec755a876e928a2d97055f60adf23255001c19a3..77f11b9b3d7afd666aa14d7b14d2ad9719d959c7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hive.metastore.api.ResourceType;
import org.apache.hadoop.hive.metastore.api.ResourceUri;
import org.apache.hadoop.hive.ql.exec.FunctionInfo.FunctionResource;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -45,6 +46,7 @@
import org.apache.hadoop.hive.ql.plan.FunctionWork;
import org.apache.hadoop.hive.ql.plan.api.StageType;
import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.util.ResourceDownloader;
import org.apache.hadoop.util.StringUtils;
/**
@@ -60,8 +62,9 @@ public FunctionTask() {
}
@Override
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
- super.initialize(conf, queryPlan, ctx);
+ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx,
+ CompilationOpContext opContext) {
+ super.initialize(conf, queryPlan, ctx, opContext);
}
@Override
@@ -249,7 +252,7 @@ private void checkLocalFunctionResources(Hive db, List resources)
for (ResourceUri res : resources) {
String resUri = res.getUri();
- if (!SessionState.canDownloadResource(resUri)) {
+ if (ResourceDownloader.isFileUri(resUri)) {
throw new HiveException("Hive warehouse is non-local, but "
+ res.getUri() + " specifies file on local filesystem. "
+ "Resources on non-local warehouse should specify a non-local scheme/path");
@@ -278,7 +281,7 @@ private void checkLocalFunctionResources(Hive db, List resources)
return converted;
}
- private static SessionState.ResourceType getResourceType(ResourceType rt) throws HiveException {
+ public static SessionState.ResourceType getResourceType(ResourceType rt) {
switch (rt) {
case JAR:
return SessionState.ResourceType.JAR;
@@ -287,7 +290,7 @@ private void checkLocalFunctionResources(Hive db, List resources)
case ARCHIVE:
return SessionState.ResourceType.ARCHIVE;
default:
- throw new HiveException("Unexpected resource type " + rt);
+ throw new AssertionError("Unexpected resource type " + rt);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
index 1693ec392b70318e9c304e7f693a9d40888094f0..0839b42bfcaf08862f0aece295f231ba1a4539f6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.OpParseContext;
import org.apache.hadoop.hive.ql.plan.AggregationDesc;
@@ -179,6 +180,15 @@ public static FastBitSet groupingSet2BitSet(int value) {
return bits;
}
+ /** Kryo ctor. */
+ protected GroupByOperator() {
+ super();
+ }
+
+ public GroupByOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java
index 1de8c7618d58f3c899ed9232b8845402263e061a..47492476b755d8d70763d964c4b495f27dc8084a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java
@@ -22,6 +22,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.HashTableDummyDesc;
import org.apache.hadoop.hive.ql.plan.TableDesc;
@@ -32,6 +33,15 @@
public class HashTableDummyOperator extends Operator implements Serializable {
private static final long serialVersionUID = 1L;
+ /** Kryo ctor. */
+ protected HashTableDummyOperator() {
+ super();
+ }
+
+ public HashTableDummyOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
index 76308f663af693f3a79c27a324f38ad866f60f74..deb7c7673046297757d132d2f0dacecb1a74e52c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionHandler;
import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
import org.apache.hadoop.hive.ql.exec.persistence.MapJoinEagerRowContainer;
@@ -104,10 +105,17 @@
private long hashTableScale;
private MapJoinMemoryExhaustionHandler memoryExhaustionHandler;
- public HashTableSinkOperator() {
+ /** Kryo ctor. */
+ protected HashTableSinkOperator() {
+ super();
}
- public HashTableSinkOperator(MapJoinOperator mjop) {
+ public HashTableSinkOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
+ public HashTableSinkOperator(CompilationOpContext ctx, MapJoinOperator mjop) {
+ this(ctx);
this.conf = new HashTableSinkDesc(mjop.getConf());
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
index 3453fc9f9c55389d8ceeb5db2aa64deb7c240fd6..08cc4b4c6ec64aaf9dde70223fcb231dda956ae4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.JoinDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -55,6 +56,15 @@
private final transient LongWritable skewjoin_followup_jobs = new LongWritable(0);
+ /** Kryo ctor. */
+ protected JoinOperator() {
+ super();
+ }
+
+ public JoinOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewForwardOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewForwardOperator.java
index e866eed37ad9301a6f71f8b9329cb7aa84ee4f09..4c94ad94559acb0c584982bb2310af6142bca79c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewForwardOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewForwardOperator.java
@@ -22,6 +22,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -55,6 +56,15 @@ public OperatorType getType() {
return OperatorType.LATERALVIEWFORWARD;
}
+ /** Kryo ctor. */
+ protected LateralViewForwardOperator() {
+ super();
+ }
+
+ public LateralViewForwardOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
index 55bb08f740fbff6b070a3d84c4f2cafba185aaa5..7407dc6d59eeb8852ff07d16d5ff260ae43a4148 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
@@ -24,6 +24,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -83,6 +84,15 @@
public static final byte SELECT_TAG = 0;
public static final byte UDTF_TAG = 1;
+ /** Kryo ctor. */
+ protected LateralViewJoinOperator() {
+ super();
+ }
+
+ public LateralViewJoinOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java
index fc85bea173e8cd8ae715c18c4711b1f705ebf817..239d56bbcbb42eadb4563f3e6a372c8df31637da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java
@@ -23,6 +23,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.LimitDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -39,6 +40,15 @@
protected transient int currCount;
protected transient boolean isMap;
+ /** Kryo ctor. */
+ protected LimitOperator() {
+ super();
+ }
+
+ public LimitOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java
index 919e72fd69dd18c0f130b0bbb08fb9a36e049f34..2f2abc1cbb749de618616f1c036f0f9cb6da440a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java
@@ -24,6 +24,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.ListSinkDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -43,6 +44,15 @@
private transient FetchFormatter fetcher;
private transient int numRows;
+ /** Kryo ctor. */
+ protected ListSinkOperator() {
+ super();
+ }
+
+ public ListSinkOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index dc0b85eed11d7f28b55098f60bd92a45326f6562..91b5ca769f88b04c379f38bad3518d36958f1c71 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.HashTableLoaderFactory;
import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
import org.apache.hadoop.hive.ql.exec.persistence.BytesBytesMultiHashMap;
@@ -97,7 +98,13 @@
protected HybridHashTableContainer firstSmallTable; // The first small table;
// Only this table has spilled big table rows
- public MapJoinOperator() {
+ /** Kryo ctor. */
+ protected MapJoinOperator() {
+ super();
+ }
+
+ public MapJoinOperator(CompilationOpContext ctx) {
+ super(ctx);
}
public MapJoinOperator(AbstractMapJoinOperator extends MapJoinDesc> mjop) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
index ec0d95ce7ec66787fa1a82aca7334f14cc8dda8f..2b690f8802fb8d9315f5c37ec87473ee42f41bcc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.MapOperator.MapOpCtx;
import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -463,6 +464,15 @@ private String getNominalPath(Path fpath) {
return nominal;
}
+ /** Kryo ctor. */
+ protected MapOperator() {
+ super();
+ }
+
+ public MapOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
public void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
index 4f4abd3fe6e1c6ef6ad31b2ad335ff83d74c2d20..d8444fb0d6447b8b411b34370a6f9c6a5ac80ab8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
@@ -28,6 +28,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.MuxDesc;
@@ -170,6 +171,15 @@ public Object process(Object row) throws HiveException {
private transient long[] cntrs;
private transient long[] nextCntrs;
+ /** Kryo ctor. */
+ protected MuxOperator() {
+ super();
+ }
+
+ public MuxOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 85ab6b231d434e2e2d29661d0bf6af04e75632d5..dbe4f80d5bbab91243b0c440d23245d459aad06d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -30,11 +30,11 @@
import java.util.concurrent.CancellationException;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -67,6 +67,7 @@
public static final String CONTEXT_NAME_KEY = "__hive.context.name";
private transient Configuration configuration;
+ protected transient CompilationOpContext cContext;
protected List> childOperators;
protected List> parentOperators;
protected String operatorId;
@@ -75,8 +76,6 @@
private transient boolean rootInitializeCalled = false;
protected final transient Collection> asyncInitOperations = new HashSet<>();
- private static AtomicInteger seqId;
-
// It can be optimized later so that an operator operator (init/close) is performed
// only after that operation has been performed on all the parents. This will require
// initializing the whole tree in all the mappers (which might be required for mappers
@@ -98,38 +97,24 @@
protected transient State state = State.UNINIT;
- static {
- seqId = new AtomicInteger(0);
- }
-
private boolean useBucketizedHiveInputFormat;
// dummy operator (for not increasing seqId)
- private Operator(String name) {
- id = name;
+ protected Operator(String name, CompilationOpContext cContext) {
+ this();
+ this.cContext = cContext;
+ this.id = name;
initOperatorId();
+ }
+
+ protected Operator() {
childOperators = new ArrayList>();
parentOperators = new ArrayList>();
abortOp = new AtomicBoolean(false);
}
- public Operator() {
- this(String.valueOf(seqId.getAndIncrement()));
- }
-
- public static void resetId() {
- seqId.set(0);
- }
-
- /**
- * Create an operator with a reporter.
- *
- * @param reporter
- * Used to report progress of certain operators.
- */
- public Operator(Reporter reporter) {
- this();
- this.reporter = reporter;
+ public Operator(CompilationOpContext cContext) {
+ this(String.valueOf(cContext.nextOperatorId()), cContext);
}
public void setChildOperators(
@@ -228,7 +213,7 @@ public RowSchema getSchema() {
protected transient final boolean isLogTraceEnabled = LOG.isTraceEnabled() && PLOG.isTraceEnabled();
protected transient String alias;
protected transient Reporter reporter;
- protected transient String id;
+ protected String id;
// object inspectors for input rows
// We will increase the size of the array on demand
protected transient ObjectInspector[] inputObjInspectors = new ObjectInspector[1];
@@ -1129,8 +1114,8 @@ public boolean supportSkewJoinOptimization() {
@SuppressWarnings("unchecked")
T descClone = (T)conf.clone();
// also clone the colExprMap by default
- Operator extends OperatorDesc> ret =
- OperatorFactory.getAndMakeChild(descClone, getSchema(), getColumnExprMap(), parentClones);
+ Operator extends OperatorDesc> ret = OperatorFactory.getAndMakeChild(
+ cContext, descClone, getSchema(), getColumnExprMap(), parentClones);
return ret;
}
@@ -1145,8 +1130,7 @@ public boolean supportSkewJoinOptimization() {
public Operator extends OperatorDesc> cloneOp() throws CloneNotSupportedException {
T descClone = (T) conf.clone();
Operator extends OperatorDesc> ret =
- OperatorFactory.getAndMakeChild(
- descClone, getSchema());
+ OperatorFactory.getAndMakeChild(cContext, descClone, getSchema());
return ret;
}
@@ -1355,7 +1339,7 @@ public static Operator createDummy() {
@SuppressWarnings({ "serial", "unchecked", "rawtypes" })
private static class DummyOperator extends Operator {
- public DummyOperator() { super("dummy"); }
+ public DummyOperator() { super("dummy", null); }
@Override
public void process(Object row, int tag) {
@@ -1384,4 +1368,13 @@ public boolean getIsReduceSink() {
public String getReduceOutputName() {
return null;
}
+
+ public void setCompilationOpContext(CompilationOpContext ctx) {
+ cContext = ctx;
+ }
+
+ /** @return Compilation operator context. Only available during compilation. */
+ public CompilationOpContext getCompilationOpContext() {
+ return cContext;
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
index f619a5649ce1b76d7677e86326659b675d67d28c..038b96ce4cba3452c29f60e0803b26d03db122e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
@@ -19,11 +19,13 @@
package org.apache.hadoop.hive.ql.exec;
import java.util.ArrayList;
+import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.vector.VectorAppMasterEventOperator;
import org.apache.hadoop.hive.ql.exec.vector.VectorFileSinkOperator;
import org.apache.hadoop.hive.ql.exec.vector.VectorFilterOperator;
@@ -72,6 +74,8 @@
import org.apache.hadoop.hive.ql.plan.UDTFDesc;
import org.apache.hadoop.hive.ql.plan.UnionDesc;
+import com.google.common.base.Preconditions;
+
/**
* OperatorFactory.
*
@@ -79,97 +83,68 @@
@SuppressWarnings({ "rawtypes", "unchecked" })
public final class OperatorFactory {
protected static transient final Logger LOG = LoggerFactory.getLogger(OperatorFactory.class);
- private static final List opvec;
- private static final List vectorOpvec;
+ private static final IdentityHashMap,
+ Class extends Operator extends OperatorDesc>>> opvec = new IdentityHashMap<>();
+ private static final IdentityHashMap,
+ Class extends Operator extends OperatorDesc>>> vectorOpvec = new IdentityHashMap<>();
static {
- opvec = new ArrayList();
- opvec.add(new OpTuple(FilterDesc.class, FilterOperator.class));
- opvec.add(new OpTuple(SelectDesc.class, SelectOperator.class));
- opvec.add(new OpTuple(ForwardDesc.class, ForwardOperator.class));
- opvec.add(new OpTuple(FileSinkDesc.class, FileSinkOperator.class));
- opvec.add(new OpTuple(CollectDesc.class, CollectOperator.class));
- opvec.add(new OpTuple(ScriptDesc.class, ScriptOperator.class));
- opvec.add(new OpTuple(PTFDesc.class, PTFOperator.class));
- opvec.add(new OpTuple(ReduceSinkDesc.class, ReduceSinkOperator.class));
- opvec.add(new OpTuple(GroupByDesc.class, GroupByOperator.class));
- opvec.add(new OpTuple(JoinDesc.class, JoinOperator.class));
- opvec.add(new OpTuple(MapJoinDesc.class, MapJoinOperator.class));
- opvec.add(new OpTuple(SMBJoinDesc.class, SMBMapJoinOperator.class));
- opvec.add(new OpTuple(LimitDesc.class, LimitOperator.class));
- opvec.add(new OpTuple(TableScanDesc.class, TableScanOperator.class));
- opvec.add(new OpTuple(UnionDesc.class, UnionOperator.class));
- opvec.add(new OpTuple(UDTFDesc.class, UDTFOperator.class));
- opvec.add(new OpTuple(LateralViewJoinDesc.class,
- LateralViewJoinOperator.class));
- opvec.add(new OpTuple(LateralViewForwardDesc.class,
- LateralViewForwardOperator.class));
- opvec.add(new OpTuple(HashTableDummyDesc.class,
- HashTableDummyOperator.class));
- opvec.add(new OpTuple(HashTableSinkDesc.class,
- HashTableSinkOperator.class));
- opvec.add(new OpTuple(SparkHashTableSinkDesc.class,
- SparkHashTableSinkOperator.class));
- opvec.add(new OpTuple(DummyStoreDesc.class,
- DummyStoreOperator.class));
- opvec.add(new OpTuple(DemuxDesc.class,
- DemuxOperator.class));
- opvec.add(new OpTuple(MuxDesc.class,
- MuxOperator.class));
- opvec.add(new OpTuple(AppMasterEventDesc.class,
- AppMasterEventOperator.class));
- opvec.add(new OpTuple(DynamicPruningEventDesc.class,
- AppMasterEventOperator.class));
- opvec.add(new OpTuple(SparkPartitionPruningSinkDesc.class,
- SparkPartitionPruningSinkOperator.class));
- opvec.add(new OpTuple(RCFileMergeDesc.class,
- RCFileMergeOperator.class));
- opvec.add(new OpTuple(OrcFileMergeDesc.class,
- OrcFileMergeOperator.class));
- opvec.add(new OpTuple(CommonMergeJoinDesc.class,
- CommonMergeJoinOperator.class));
- opvec.add(new OpTuple(ListSinkDesc.class,
- ListSinkOperator.class));
+ opvec.put(FilterDesc.class, FilterOperator.class);
+ opvec.put(SelectDesc.class, SelectOperator.class);
+ opvec.put(ForwardDesc.class, ForwardOperator.class);
+ opvec.put(FileSinkDesc.class, FileSinkOperator.class);
+ opvec.put(CollectDesc.class, CollectOperator.class);
+ opvec.put(ScriptDesc.class, ScriptOperator.class);
+ opvec.put(PTFDesc.class, PTFOperator.class);
+ opvec.put(ReduceSinkDesc.class, ReduceSinkOperator.class);
+ opvec.put(GroupByDesc.class, GroupByOperator.class);
+ opvec.put(JoinDesc.class, JoinOperator.class);
+ opvec.put(MapJoinDesc.class, MapJoinOperator.class);
+ opvec.put(SMBJoinDesc.class, SMBMapJoinOperator.class);
+ opvec.put(LimitDesc.class, LimitOperator.class);
+ opvec.put(TableScanDesc.class, TableScanOperator.class);
+ opvec.put(UnionDesc.class, UnionOperator.class);
+ opvec.put(UDTFDesc.class, UDTFOperator.class);
+ opvec.put(LateralViewJoinDesc.class, LateralViewJoinOperator.class);
+ opvec.put(LateralViewForwardDesc.class, LateralViewForwardOperator.class);
+ opvec.put(HashTableDummyDesc.class, HashTableDummyOperator.class);
+ opvec.put(HashTableSinkDesc.class, HashTableSinkOperator.class);
+ opvec.put(SparkHashTableSinkDesc.class, SparkHashTableSinkOperator.class);
+ opvec.put(DummyStoreDesc.class, DummyStoreOperator.class);
+ opvec.put(DemuxDesc.class, DemuxOperator.class);
+ opvec.put(MuxDesc.class, MuxOperator.class);
+ opvec.put(AppMasterEventDesc.class, AppMasterEventOperator.class);
+ opvec.put(DynamicPruningEventDesc.class, AppMasterEventOperator.class);
+ opvec.put(SparkPartitionPruningSinkDesc.class, SparkPartitionPruningSinkOperator.class);
+ opvec.put(RCFileMergeDesc.class, RCFileMergeOperator.class);
+ opvec.put(OrcFileMergeDesc.class, OrcFileMergeOperator.class);
+ opvec.put(CommonMergeJoinDesc.class, CommonMergeJoinOperator.class);
+ opvec.put(ListSinkDesc.class, ListSinkOperator.class);
}
static {
- vectorOpvec = new ArrayList();
- vectorOpvec.add(new OpTuple(AppMasterEventDesc.class,
- VectorAppMasterEventOperator.class));
- vectorOpvec.add(new OpTuple(DynamicPruningEventDesc.class,
- VectorAppMasterEventOperator.class));
- vectorOpvec.add(new OpTuple(
- SparkPartitionPruningSinkDesc.class,
- VectorSparkPartitionPruningSinkOperator.class));
- vectorOpvec.add(new OpTuple(SelectDesc.class, VectorSelectOperator.class));
- vectorOpvec.add(new OpTuple(GroupByDesc.class, VectorGroupByOperator.class));
- vectorOpvec.add(new OpTuple(MapJoinDesc.class, VectorMapJoinOperator.class));
- vectorOpvec.add(new OpTuple(SMBJoinDesc.class, VectorSMBMapJoinOperator.class));
- vectorOpvec.add(new OpTuple(ReduceSinkDesc.class,
- VectorReduceSinkOperator.class));
- vectorOpvec.add(new OpTuple(FileSinkDesc.class, VectorFileSinkOperator.class));
- vectorOpvec.add(new OpTuple(FilterDesc.class, VectorFilterOperator.class));
- vectorOpvec.add(new OpTuple(LimitDesc.class, VectorLimitOperator.class));
- vectorOpvec.add(new OpTuple(SparkHashTableSinkDesc.class,
- VectorSparkHashTableSinkOperator.class));
- }
-
- private static final class OpTuple {
- private final Class descClass;
- private final Class extends Operator>> opClass;
-
- public OpTuple(Class descClass, Class extends Operator>> opClass) {
- this.descClass = descClass;
- this.opClass = opClass;
- }
+ vectorOpvec.put(AppMasterEventDesc.class, VectorAppMasterEventOperator.class);
+ vectorOpvec.put(DynamicPruningEventDesc.class, VectorAppMasterEventOperator.class);
+ vectorOpvec.put(
+ SparkPartitionPruningSinkDesc.class, VectorSparkPartitionPruningSinkOperator.class);
+ vectorOpvec.put(SelectDesc.class, VectorSelectOperator.class);
+ vectorOpvec.put(GroupByDesc.class, VectorGroupByOperator.class);
+ vectorOpvec.put(MapJoinDesc.class, VectorMapJoinOperator.class);
+ vectorOpvec.put(SMBJoinDesc.class, VectorSMBMapJoinOperator.class);
+ vectorOpvec.put(ReduceSinkDesc.class, VectorReduceSinkOperator.class);
+ vectorOpvec.put(FileSinkDesc.class, VectorFileSinkOperator.class);
+ vectorOpvec.put(FilterDesc.class, VectorFilterOperator.class);
+ vectorOpvec.put(LimitDesc.class, VectorLimitOperator.class);
+ vectorOpvec.put(SparkHashTableSinkDesc.class, VectorSparkHashTableSinkOperator.class);
}
public static Operator getVectorOperator(
- Class extends Operator>> opClass, T conf, VectorizationContext vContext) throws HiveException {
+ Class extends Operator>> opClass, CompilationOpContext cContext, T conf,
+ VectorizationContext vContext) throws HiveException {
try {
Operator op = (Operator) opClass.getDeclaredConstructor(
- VectorizationContext.class, OperatorDesc.class).newInstance(
- vContext, conf);
+ CompilationOpContext.class, VectorizationContext.class, OperatorDesc.class)
+ .newInstance(cContext, vContext, conf);
return op;
} catch (Exception e) {
e.printStackTrace();
@@ -177,50 +152,49 @@ public OpTuple(Class descClass, Class extends Operator>> opClass) {
}
}
- public static Operator getVectorOperator(T conf,
- VectorizationContext vContext) throws HiveException {
+ public static Operator getVectorOperator(
+ CompilationOpContext cContext, T conf, VectorizationContext vContext) throws HiveException {
Class descClass = (Class) conf.getClass();
- for (OpTuple o : vectorOpvec) {
- if (o.descClass == descClass) {
- return getVectorOperator(o.opClass, conf, vContext);
- }
+ Class> opClass = vectorOpvec.get(descClass);
+ if (opClass != null) {
+ return getVectorOperator(vectorOpvec.get(descClass), cContext, conf, vContext);
}
- throw new HiveException("No vector operator for descriptor class "
- + descClass.getName());
+ throw new HiveException("No vector operator for descriptor class " + descClass.getName());
}
- public static Operator get(Class opClass) {
-
- for (OpTuple o : opvec) {
- if (o.descClass == opClass) {
- try {
- Operator op = (Operator) o.opClass.newInstance();
- return op;
- } catch (Exception e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
+ public static Operator get(
+ CompilationOpContext cContext, Class descClass) {
+ Preconditions.checkNotNull(cContext);
+ Class> opClass = opvec.get(descClass);
+ if (opClass != null) {
+ try {
+ return (Operator)opClass.getDeclaredConstructor(
+ CompilationOpContext.class).newInstance(cContext);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
}
}
- throw new RuntimeException("No operator for descriptor class "
- + opClass.getName());
+ throw new RuntimeException("No operator for descriptor class " + descClass.getName());
}
- public static Operator get(Class opClass,
- RowSchema rwsch) {
-
- Operator ret = get(opClass);
- ret.setSchema(rwsch);
- return ret;
+ /**
+ * Returns an operator given the conf and a list of children operators.
+ */
+ public static Operator get(CompilationOpContext cContext, T conf) {
+ Operator ret = get(cContext, (Class) conf.getClass());
+ ret.setConf(conf);
+ return (ret);
}
/**
* Returns an operator given the conf and a list of children operators.
*/
public static Operator get(T conf,
- Operator extends OperatorDesc>... oplist) {
- Operator ret = get((Class) conf.getClass());
+ Operator extends OperatorDesc> oplist0, Operator extends OperatorDesc>... oplist) {
+ Operator ret = get(oplist0.getCompilationOpContext(), (Class) conf.getClass());
ret.setConf(conf);
+ makeChild(ret, oplist0);
makeChild(ret, oplist);
return (ret);
}
@@ -253,27 +227,28 @@ public static void makeChild(
/**
* Returns an operator given the conf and a list of children operators.
*/
- public static Operator get(T conf,
- RowSchema rwsch, Operator... oplist) {
- Operator ret = get(conf, oplist);
+ public static Operator get(
+ CompilationOpContext cContext, T conf, RowSchema rwsch) {
+ Operator ret = get(cContext, conf);
ret.setSchema(rwsch);
return (ret);
}
+
/**
* Returns an operator given the conf and a list of parent operators.
*/
- public static Operator getAndMakeChild(T conf,
- Operator... oplist) {
- Operator ret = get((Class) conf.getClass());
+ public static Operator getAndMakeChild(
+ T conf, Operator oplist0, Operator... oplist) {
+ Operator ret = get(oplist0.getCompilationOpContext(), (Class) conf.getClass());
ret.setConf(conf);
- if (oplist.length == 0) {
- return (ret);
- }
// Add the new operator as child of each of the passed in operators
+ List children = oplist0.getChildOperators();
+ children.add(ret);
+ oplist0.setChildOperators(children);
for (Operator op : oplist) {
- List children = op.getChildOperators();
+ children = op.getChildOperators();
children.add(ret);
op.setChildOperators(children);
}
@@ -281,6 +256,7 @@ public static void makeChild(
// add parents for the newly created operator
List> parent =
new ArrayList>();
+ parent.add(oplist0);
for (Operator op : oplist) {
parent.add(op);
}
@@ -293,9 +269,9 @@ public static void makeChild(
/**
* Returns an operator given the conf and a list of parent operators.
*/
- public static Operator getAndMakeChild(T conf,
- List> oplist) {
- Operator ret = get((Class) conf.getClass());
+ public static Operator getAndMakeChild(CompilationOpContext cContext,
+ T conf, List> oplist) {
+ Operator ret = get(cContext, (Class) conf.getClass());
ret.setConf(conf);
if (oplist.size() == 0) {
return ret;
@@ -322,9 +298,49 @@ public static void makeChild(
/**
* Returns an operator given the conf and a list of parent operators.
*/
- public static Operator getAndMakeChild(T conf,
- RowSchema rwsch, Operator... oplist) {
- Operator ret = getAndMakeChild(conf, oplist);
+ public static Operator getAndMakeChild(
+ CompilationOpContext cContext, T conf, RowSchema rwsch) {
+ Operator ret = get(cContext, (Class) conf.getClass());
+ ret.setConf(conf);
+ ret.setSchema(rwsch);
+ return ret;
+ }
+
+ /**
+ * Returns an operator given the conf and a list of parent operators.
+ */
+ public static Operator getAndMakeChild(
+ CompilationOpContext ctx, T conf, RowSchema rwsch, Operator[] oplist) {
+ Operator ret = get(ctx, (Class) conf.getClass());
+ ret.setConf(conf);
+ ret.setSchema(rwsch);
+ if (oplist.length == 0) return ret;
+
+ // Add the new operator as child of each of the passed in operators
+ for (Operator op : oplist) {
+ List children = op.getChildOperators();
+ children.add(ret);
+ op.setChildOperators(children);
+ }
+
+ // add parents for the newly created operator
+ List> parent =
+ new ArrayList>();
+ for (Operator op : oplist) {
+ parent.add(op);
+ }
+
+ ret.setParentOperators(parent);
+
+ return (ret);
+ }
+
+ /**
+ * Returns an operator given the conf and a list of parent operators.
+ */
+ public static Operator getAndMakeChild(
+ T conf, RowSchema rwsch, Operator oplist0, Operator... oplist) {
+ Operator ret = getAndMakeChild(conf, oplist0, oplist);
ret.setSchema(rwsch);
return ret;
}
@@ -332,9 +348,9 @@ public static void makeChild(
/**
* Returns an operator given the conf and a list of parent operators.
*/
- public static Operator getAndMakeChild(T conf,
- RowSchema rwsch, Map colExprMap, Operator... oplist) {
- Operator ret = getAndMakeChild(conf, rwsch, oplist);
+ public static Operator getAndMakeChild(T conf, RowSchema rwsch,
+ Map colExprMap, Operator oplist0, Operator... oplist) {
+ Operator ret = getAndMakeChild(conf, rwsch, oplist0, oplist);
ret.setColumnExprMap(colExprMap);
return (ret);
}
@@ -342,9 +358,9 @@ public static void makeChild(
/**
* Returns an operator given the conf and a list of parent operators.
*/
- public static Operator getAndMakeChild(T conf,
- RowSchema rwsch, List> oplist) {
- Operator ret = getAndMakeChild(conf, oplist);
+ public static Operator getAndMakeChild(CompilationOpContext cContext,
+ T conf, RowSchema rwsch, List> oplist) {
+ Operator ret = getAndMakeChild(cContext, conf, oplist);
ret.setSchema(rwsch);
return (ret);
}
@@ -352,9 +368,10 @@ public static void makeChild(
/**
* Returns an operator given the conf and a list of parent operators.
*/
- public static Operator getAndMakeChild(T conf,
- RowSchema rwsch, Map colExprMap, List> oplist) {
- Operator ret = getAndMakeChild(conf, rwsch, oplist);
+ public static Operator getAndMakeChild(CompilationOpContext cContext,
+ T conf, RowSchema rwsch, Map colExprMap,
+ List> oplist) {
+ Operator ret = getAndMakeChild(cContext, conf, rwsch, oplist);
ret.setColumnExprMap(colExprMap);
return (ret);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
index 2c9deacc90e095fc132130116ce990f484e2e304..445cf3da95a969b27075a9a2e434f500f4119256 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.io.orc.Writer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -56,6 +57,15 @@
private Reader reader;
private FSDataInputStream fdis;
+ /** Kryo ctor. */
+ protected OrcFileMergeOperator() {
+ super();
+ }
+
+ public OrcFileMergeOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
public void process(Object row, int tag) throws HiveException {
Object[] keyValue = (Object[]) row;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
index 113ac2177885ca900a8d972261946832f3ba04d5..2e9e539dfd7615ba4ce892626850abe2d2b78b28 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
@@ -26,6 +26,7 @@
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.PTFPartition.PTFPartitionIterator;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
@@ -60,6 +61,15 @@
transient Configuration hiveConf;
transient PTFInvocation ptfInvocation;
+ /** Kryo ctor. */
+ protected PTFOperator() {
+ super();
+ }
+
+ public PTFOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
/*
* 1. Find out if the operator is invoked at Map-Side or Reduce-side
* 2. Get the deserialized QueryDef
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java
index c34454c2a72e125872cb7b5c8ceb79c95f4af035..4dea1d20cf57f41245a8d1f7bd2629e963640299 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java
@@ -19,6 +19,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.io.RCFile;
import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileKeyBufferWrapper;
@@ -36,12 +37,22 @@
*/
public class RCFileMergeOperator
extends AbstractFileMergeOperator {
+
public final static Logger LOG = LoggerFactory.getLogger("RCFileMergeMapper");
RCFile.Writer outWriter;
CompressionCodec codec = null;
int columnNumber = 0;
+ /** Kryo ctor. */
+ protected RCFileMergeOperator() {
+ super();
+ }
+
+ public RCFileMergeOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
public void process(Object row, int tag) throws HiveException {
Object[] keyValue = (Object[]) row;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
index 4b659523cb0b56928927f1e9353c62e12bc25ebe..74b480279524cabfeaa024a772a5ca0bebca866b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveKey;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -151,6 +152,15 @@
protected transient long logEveryNRows = 0;
private final transient LongWritable recordCounter = new LongWritable();
+ /** Kryo ctor. */
+ protected ReduceSinkOperator() {
+ super();
+ }
+
+ public ReduceSinkOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
index 62ae630042109c328fe20cd355a31a11ec3ec00e..23abec3b1d05b2ab0997734cad3fd72ce840902a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
@@ -34,7 +34,9 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.BucketMapJoinContext;
@@ -85,7 +87,13 @@
// performed as a smb join, based on all the tables/partitions being joined.
private transient boolean convertedAutomaticallySMBJoin = false;
- public SMBMapJoinOperator() {
+ /** Kryo ctor. */
+ protected SMBMapJoinOperator() {
+ super();
+ }
+
+ public SMBMapJoinOperator(CompilationOpContext ctx) {
+ super(ctx);
}
public SMBMapJoinOperator(AbstractMapJoinOperator extends MapJoinDesc> mapJoinOp) {
@@ -196,6 +204,7 @@ public void initializeMapredLocalWork(MapJoinDesc mjConf, Configuration hconf,
// push down filters
HiveInputFormat.pushFilters(jobClone, ts);
+ AcidUtils.setTransactionalTableScan(jobClone, ts.getConf().isAcidTable());
ts.passExecContext(getExecContext());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
index 63837cec1bf0b5cb2f94f9431f864c0e91da937d..9f311a4b8f9004c25ff57e376890ae47e2999c1f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
@@ -20,6 +20,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.ScriptDesc;
@@ -266,6 +267,15 @@ public File getAbsolutePath(String filename) {
}
}
+ /** Kryo ctor. */
+ protected ScriptOperator() {
+ super();
+ }
+
+ public ScriptOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
index b1b84590d0cbf042bd869f07f3407f8963e948f2..e7c23e86f6d5256cbf3e6e4afd8d7204ce3e191a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -43,6 +44,15 @@
private transient boolean isSelectStarNoCompute = false;
+ /** Kryo ctor. */
+ protected SelectOperator() {
+ super();
+ }
+
+ public SelectOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
index 45f50c39ba5b3d679262ca44f6ea2b7c2d4c114c..0e6e2dee84735d559cf562f9b650fca8354adb79 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hive.ql.exec;
+import java.util.LinkedList;
+
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
@@ -33,6 +35,7 @@
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
@@ -356,26 +359,64 @@ public static MapredWork clonePlan(MapredWork plan) {
// TODO: need proper clone. Meanwhile, let's at least keep this horror in one place
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN);
+ Operator> op = plan.getAnyOperator();
+ CompilationOpContext ctx = (op == null) ? null : op.getCompilationOpContext();
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
serializePlan(plan, baos, true);
MapredWork newPlan = deserializePlan(new ByteArrayInputStream(baos.toByteArray()),
MapredWork.class, true);
+ // Restore the context.
+ for (Operator> newOp : newPlan.getAllOperators()) {
+ newOp.setCompilationOpContext(ctx);
+ }
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN);
return newPlan;
}
/**
* Clones using the powers of XML. Do not use unless necessary.
+ * @param roots The roots.
+ * @return The clone.
+ */
+ public static List> cloneOperatorTree(List> roots) {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
+ CompilationOpContext ctx = roots.isEmpty() ? null : roots.get(0).getCompilationOpContext();
+ serializePlan(roots, baos, true);
+ @SuppressWarnings("unchecked")
+ List> result =
+ deserializePlan(new ByteArrayInputStream(baos.toByteArray()),
+ roots.getClass(), true);
+ // Restore the context.
+ LinkedList> newOps = new LinkedList<>(result);
+ while (!newOps.isEmpty()) {
+ Operator> newOp = newOps.poll();
+ newOp.setCompilationOpContext(ctx);
+ List> children = newOp.getChildOperators();
+ if (children != null) {
+ newOps.addAll(children);
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Clones using the powers of XML. Do not use unless necessary.
* @param plan The plan.
* @return The clone.
*/
public static BaseWork cloneBaseWork(BaseWork plan) {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN);
+ Operator> op = plan.getAnyRootOperator();
+ CompilationOpContext ctx = (op == null) ? null : op.getCompilationOpContext();
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
serializePlan(plan, baos, true);
BaseWork newPlan = deserializePlan(new ByteArrayInputStream(baos.toByteArray()),
plan.getClass(), true);
+ // Restore the context.
+ for (Operator> newOp : newPlan.getAllOperators()) {
+ newOp.setCompilationOpContext(ctx);
+ }
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN);
return newPlan;
}
@@ -399,16 +440,6 @@ private static void serializeObjectByKryo(Kryo kryo, Object plan, OutputStream o
return t;
}
- public static List> cloneOperatorTree(List> roots) {
- ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
- serializePlan(roots, baos, true);
- @SuppressWarnings("unchecked")
- List> result =
- deserializePlan(new ByteArrayInputStream(baos.toByteArray()),
- roots.getClass(), true);
- return result;
- }
-
/**
* Serializes expression via Kryo.
* @param expr Expression.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
index 7a8de2d3909ccccb79b94b942a5781bd7adbc7ba..85344fc6a1f78109df78c3a700f72056ce165049 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.persistence.MapJoinPersistableTableContainer;
import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
import org.apache.hadoop.hive.ql.log.PerfLogger;
@@ -54,8 +55,15 @@
private final HashTableSinkOperator htsOperator;
- public SparkHashTableSinkOperator() {
- htsOperator = new HashTableSinkOperator();
+ /** Kryo ctor. */
+ protected SparkHashTableSinkOperator() {
+ super();
+ htsOperator = null; // Kryo will set this; or so we hope.
+ }
+
+ public SparkHashTableSinkOperator(CompilationOpContext ctx) {
+ super(ctx);
+ htsOperator = new HashTableSinkOperator(ctx);
}
@Override
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
index bb4bde98d6221746c0a3b8bde085ca110b431b0b..14eacdf3540fd7b0bce262ffd7901d95729974f8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
@@ -81,8 +82,9 @@ public StatsNoJobTask() {
}
@Override
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) {
- super.initialize(conf, queryPlan, driverContext);
+ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext,
+ CompilationOpContext opContext) {
+ super.initialize(conf, queryPlan, driverContext, opContext);
jc = new JobConf(conf);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
index 32806dc042eeecf1b59ad2838d64a8377af7593c..5253521557d09f12163624606373d64cde005d70 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -209,6 +210,15 @@ private void gatherStats(Object row) {
}
+ /** Kryo ctor. */
+ protected TableScanOperator() {
+ super();
+ }
+
+ public TableScanOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
index 0eab63ef7fc7e0f291fdcf2c1e001860ea38f633..40c89cb9681567a18181514cbffc497e0b2f937e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
@@ -30,6 +30,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.lib.Node;
@@ -120,7 +121,8 @@ public TaskHandle getTaskHandle() {
return taskHandle;
}
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) {
+ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext,
+ CompilationOpContext opContext) {
this.queryPlan = queryPlan;
isdone = false;
started = false;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TemporaryHashSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TemporaryHashSinkOperator.java
index 9e3a84f81de852a37f70f7105673e888b2e521c0..bc311ee79743cbe32dce91df0406985dd2b8718b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TemporaryHashSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TemporaryHashSinkOperator.java
@@ -20,6 +20,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.HashTableSinkDesc;
import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
@@ -27,7 +28,8 @@
import java.io.IOException;
public class TemporaryHashSinkOperator extends HashTableSinkOperator {
- public TemporaryHashSinkOperator(MapJoinDesc desc) {
+ public TemporaryHashSinkOperator(CompilationOpContext ctx, MapJoinDesc desc) {
+ super(ctx);
conf = new HashTableSinkDesc(desc);
// Sanity check the config.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TerminalOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TerminalOperator.java
index 069eab255e57e816735543696ccd2dff714f3431..04d6c9f757ee4f7d27237c4ed5d4163fac79357a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TerminalOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TerminalOperator.java
@@ -20,6 +20,7 @@
import java.io.Serializable;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
/**
@@ -29,4 +30,12 @@
Operator implements Serializable {
private static final long serialVersionUID = 1L;
+ /** Kryo ctor. */
+ protected TerminalOperator() {
+ super();
+ }
+
+ public TerminalOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java
index e9f65be7fb196cbea62342b6a7a80db78ffce7b6..f28f928347d34ae2780c8da80c2a2560968202a8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.exec;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
@@ -27,6 +28,15 @@
*
*/
public class TezDummyStoreOperator extends DummyStoreOperator {
+ /** Kryo ctor. */
+ protected TezDummyStoreOperator() {
+ super();
+ }
+
+ public TezDummyStoreOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
private boolean fetchDone = false;
/**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java
index b3c6d916cb91d688400a8042cd0c3690fe742f74..1dae96303de4465010ffa6504e75b714a0cb3fa0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java
@@ -29,6 +29,7 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.UDTFDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -58,6 +59,15 @@
*/
transient AutoProgressor autoProgressor;
+ /** Kryo ctor. */
+ protected UDTFOperator() {
+ super();
+ }
+
+ public UDTFOperator(CompilationOpContext ctx) {
+ super(ctx);
+ }
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
index ddb23ee745d4bddc56e3afd8c453739eadbd299f..3a673e6c1a95c0959ae9f6123c293504c1b33d6c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
@@ -20,11 +20,10 @@
import java.io.Serializable;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.List;
-import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.UnionDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -48,6 +47,15 @@
ArrayList