diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml
index 756b997cff..58eb96d338 100644
--- a/checkstyle/checkstyle.xml
+++ b/checkstyle/checkstyle.xml
@@ -53,9 +53,6 @@
-
-
-
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index a120b4573d..ca15382b7f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -478,6 +478,9 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
REPL_DUMP_METADATA_ONLY("hive.repl.dump.metadata.only", false,
"Indicates whether replication dump only metadata information or data + metadata. \n"
+ "This config makes hive.repl.include.external.tables config ineffective."),
+ REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE("hive.repl.dump.metadata.only.for.external.table",
+ false,
+ "Indicates whether external table replication dump only metadata information or data + metadata"),
REPL_BOOTSTRAP_ACID_TABLES("hive.repl.bootstrap.acid.tables", false,
"Indicates if repl dump should bootstrap the information about ACID tables along with \n"
+ "incremental dump for replication. It is recommended to keep this config parameter \n"
@@ -601,6 +604,10 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
"Maximum number of dynamic partitions allowed to be created in total."),
DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100,
"Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."),
+ DYNAMICPARTITIONCONVERT("hive.exec.dynamic.partition.type.conversion", true,
+ "Whether to check and cast a dynamic partition column before creating the partition " +
+ "directory. For example, if partition p is type int and we insert string '001', then if " +
+ "this value is true, directory p=1 will be created; if false, p=001"),
MAXCREATEDFILES("hive.exec.max.created.files", 100000L,
"Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."),
DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/package-info.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/package-info.java
deleted file mode 100644
index 1ddb66e44b..0000000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Integration tests for the HiveMetaTool program. */
-package org.apache.hadoop.hive.metastore.tools.metatool;
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetadataReplicationScenariosExternalTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetadataReplicationScenariosExternalTables.java
new file mode 100644
index 0000000000..f2d61ccf44
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetadataReplicationScenariosExternalTables.java
@@ -0,0 +1,647 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore;
+import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.FILE_NAME;
+import static org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.INC_BOOTSTRAP_ROOT_DIR_NAME;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * TestMetadataReplicationScenariosExternalTables - Test metadata only replication
+ * for external tables
+ */
+public class TestMetadataReplicationScenariosExternalTables extends BaseReplicationAcrossInstances {
+
+ private static final String REPLICA_EXTERNAL_BASE = "/replica_external_base";
+ String extraPrimaryDb;
+
+ @BeforeClass
+ public static void classLevelSetup() throws Exception {
+ Map overrides = new HashMap<>();
+ overrides.put(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(),
+ GzipJSONMessageEncoder.class.getCanonicalName());
+ overrides.put(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY.varname, "false");
+ overrides.put(HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES.varname, "true");
+ overrides.put(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE.varname, "true");
+ overrides.put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname,
+ UserGroupInformation.getCurrentUser().getUserName());
+
+ internalBeforeClassSetup(overrides, TestReplicationScenarios.class);
+ }
+
+ @Before
+ public void setup() throws Throwable {
+ super.setup();
+ extraPrimaryDb = "extra_" + primaryDbName;
+ }
+
+ @After
+ public void tearDown() throws Throwable {
+ primary.run("drop database if exists " + extraPrimaryDb + " cascade");
+ super.tearDown();
+ }
+
+ @Test
+ public void replicationWithoutExternalTables() throws Throwable {
+ List loadWithClause = externalTableBasePathWithClause();
+ List dumpWithClause
+ = Arrays.asList("'" + HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES.varname + "'='false'");
+
+
+
+ WarehouseInstance.Tuple tuple = primary
+ .run("use " + primaryDbName)
+ .run("create external table t1 (id int)")
+ .run("insert into table t1 values (1)")
+ .run("insert into table t1 values (2)")
+ .run("create external table t2 (place string) partitioned by (country string)")
+ .run("insert into table t2 partition(country='india') values ('bangalore')")
+ .run("insert into table t2 partition(country='us') values ('austin')")
+ .run("insert into table t2 partition(country='france') values ('paris')")
+ .dump(primaryDbName, null, dumpWithClause);
+
+ // the _external_tables_file info only should be created if external tables are to be replicated not otherwise
+ assertFalse(primary.miniDFSCluster.getFileSystem()
+ .exists(new Path(new Path(tuple.dumpLocation, primaryDbName.toLowerCase()), FILE_NAME)));
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .run("repl status " + replicatedDbName)
+ .verifyResult(tuple.lastReplicationId)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't1'")
+ .verifyFailure(new String[] {"t1"})
+ .run("show tables like 't2'")
+ .verifyFailure(new String[] {"t2"})
+ .verifyReplTargetProperty(replicatedDbName);
+
+ tuple = primary.run("use " + primaryDbName)
+ .run("create external table t3 (id int)")
+ .run("insert into table t3 values (10)")
+ .run("insert into table t3 values (20)")
+ .dump(primaryDbName, tuple.lastReplicationId, dumpWithClause);
+
+ // the _external_tables_file info only should be created if external tables are to be replicated not otherwise
+ assertFalse(primary.miniDFSCluster.getFileSystem()
+ .exists(new Path(tuple.dumpLocation, FILE_NAME)));
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't3'")
+ .verifyFailure(new String[] {"t3"})
+ .verifyReplTargetProperty(replicatedDbName);
+ }
+
+ @Test
+ public void externalTableReplicationWithDefaultPaths() throws Throwable {
+ //creates external tables with partitions
+ WarehouseInstance.Tuple tuple = primary
+ .run("use " + primaryDbName)
+ .run("create external table t1 (id int)")
+ .run("insert into table t1 values (1)")
+ .run("insert into table t1 values (2)")
+ .run("create external table t2 (place string) partitioned by (country string)")
+ .run("insert into table t2 partition(country='india') values ('bangalore')")
+ .run("insert into table t2 partition(country='us') values ('austin')")
+ .run("insert into table t2 partition(country='france') values ('paris')")
+ .dump("repl dump " + primaryDbName);
+
+ // verify that the external table info is not written as metadata only replication
+ assertFalseExternalFileInfo(new Path(new Path(tuple.dumpLocation, primaryDbName.toLowerCase()), FILE_NAME));
+
+ List withClauseOptions = externalTableBasePathWithClause();
+
+ replica.load(replicatedDbName, tuple.dumpLocation, withClauseOptions)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't1'")
+ .verifyResult("t1")
+ .run("show tables like 't2'")
+ .verifyResult("t2")
+ .run("repl status " + replicatedDbName)
+ .verifyResult(tuple.lastReplicationId)
+ .run("select country from t2 where country = 'us'")
+ .verifyResult(null)
+ .run("select country from t2 where country = 'france'")
+ .verifyResult(null);
+
+ // Ckpt should be set on bootstrapped db.
+ replica.verifyIfCkptSet(replicatedDbName, tuple.dumpLocation);
+
+ tuple = primary.run("use " + primaryDbName)
+ .run("create external table t3 (id int)")
+ .run("insert into table t3 values (10)")
+ .run("create external table t4 as select id from t3")
+ .dump("repl dump " + primaryDbName + " from " + tuple.lastReplicationId);
+
+ // verify that the external table info is written correctly for incremental
+ assertFalseExternalFileInfo(new Path(tuple.dumpLocation, FILE_NAME));
+
+ replica.load(replicatedDbName, tuple.dumpLocation, withClauseOptions)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't3'")
+ .verifyResult("t3")
+ .run("select id from t3")
+ .verifyResult(null)
+ .run("select id from t4")
+ .verifyResult(null);
+
+ tuple = primary.run("use " + primaryDbName)
+ .run("drop table t1")
+ .dump("repl dump " + primaryDbName + " from " + tuple.lastReplicationId);
+
+ // verify that the external table info is written correctly for incremental
+ assertFalseExternalFileInfo(new Path(tuple.dumpLocation, FILE_NAME));
+ }
+
+ @Test
+ public void externalTableReplicationWithCustomPaths() throws Throwable {
+ Path externalTableLocation =
+ new Path("/" + testName.getMethodName() + "/" + primaryDbName + "/" + "a/");
+ DistributedFileSystem fs = primary.miniDFSCluster.getFileSystem();
+ fs.mkdirs(externalTableLocation, new FsPermission("777"));
+
+ // Create base directory but use HDFS path without schema or authority details.
+ // Hive should pick up the local cluster's HDFS schema/authority.
+ externalTableBasePathWithClause();
+ List loadWithClause = Arrays.asList(
+ "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+ + REPLICA_EXTERNAL_BASE + "'",
+ "'distcp.options.update'=''"
+ );
+
+ WarehouseInstance.Tuple bootstrapTuple = primary.run("use " + primaryDbName)
+ .run("create external table a (i int, j int) "
+ + "row format delimited fields terminated by ',' "
+ + "location '" + externalTableLocation.toUri() + "'")
+ .dump(primaryDbName, null);
+
+ replica.load(replicatedDbName, bootstrapTuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("show tables like 'a'")
+ .verifyResults(Collections.singletonList("a"))
+ .run("select * From a").verifyResults(Collections.emptyList());
+
+ //externally add data to location
+ try (FSDataOutputStream outputStream =
+ fs.create(new Path(externalTableLocation, "file1.txt"))) {
+ outputStream.write("1,2\n".getBytes());
+ outputStream.write("13,21\n".getBytes());
+ }
+
+ WarehouseInstance.Tuple incrementalTuple = primary.run("create table b (i int)")
+ .dump(primaryDbName, bootstrapTuple.lastReplicationId);
+
+ replica.load(replicatedDbName, incrementalTuple.dumpLocation, loadWithClause)
+ .run("select i From a")
+ .verifyResults(new String[] {})
+ .run("select j from a")
+ .verifyResults(new String[] {});
+
+ // alter table location to something new.
+ externalTableLocation =
+ new Path("/" + testName.getMethodName() + "/" + primaryDbName + "/new_location/a/");
+ incrementalTuple = primary.run("use " + primaryDbName)
+ .run("alter table a set location '" + externalTableLocation + "'")
+ .dump(primaryDbName, incrementalTuple.lastReplicationId);
+
+ replica.load(replicatedDbName, incrementalTuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("select i From a")
+ .verifyResults(Collections.emptyList());
+ }
+
+ @Test
+ public void externalTableWithPartitions() throws Throwable {
+ Path externalTableLocation =
+ new Path("/" + testName.getMethodName() + "/t2/");
+ DistributedFileSystem fs = primary.miniDFSCluster.getFileSystem();
+ fs.mkdirs(externalTableLocation, new FsPermission("777"));
+
+ List loadWithClause = externalTableBasePathWithClause();
+
+ WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName)
+ .run("create external table t2 (place string) partitioned by (country string) row format "
+ + "delimited fields terminated by ',' location '" + externalTableLocation.toString()
+ + "'")
+ .run("insert into t2 partition(country='india') values ('bangalore')")
+ .dump("repl dump " + primaryDbName);
+
+ assertFalseExternalFileInfo(new Path(new Path(tuple.dumpLocation, primaryDbName.toLowerCase()), FILE_NAME));
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't2'")
+ .verifyResults(new String[] {"t2"})
+ .run("select place from t2")
+ .verifyResults(new String[] {})
+ .verifyReplTargetProperty(replicatedDbName);
+
+ // add new data externally, to a partition, but under the table level top directory
+ Path partitionDir = new Path(externalTableLocation, "country=india");
+ try (FSDataOutputStream outputStream = fs.create(new Path(partitionDir, "file.txt"))) {
+ outputStream.write("pune\n".getBytes());
+ outputStream.write("mumbai\n".getBytes());
+ }
+
+ tuple = primary.run("use " + primaryDbName)
+ .run("insert into t2 partition(country='australia') values ('sydney')")
+ .dump(primaryDbName, tuple.lastReplicationId);
+
+ assertFalseExternalFileInfo(new Path(tuple.dumpLocation, FILE_NAME));
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("select distinct(country) from t2")
+ .verifyResults(new String[] {})
+ .run("select place from t2 where country='india'")
+ .verifyResults(new String[] {})
+ .run("select place from t2 where country='australia'")
+ .verifyResults(new String[] {})
+ .verifyReplTargetProperty(replicatedDbName);
+
+ Path customPartitionLocation =
+ new Path("/" + testName.getMethodName() + "/partition_data/t2/country=france");
+ fs.mkdirs(externalTableLocation, new FsPermission("777"));
+
+ // add new partitions to the table, at an external location than the table level directory
+ try (FSDataOutputStream outputStream = fs
+ .create(new Path(customPartitionLocation, "file.txt"))) {
+ outputStream.write("paris".getBytes());
+ }
+
+ tuple = primary.run("use " + primaryDbName)
+ .run("ALTER TABLE t2 ADD PARTITION (country='france') LOCATION '" + customPartitionLocation
+ .toString() + "'")
+ .dump(primaryDbName, tuple.lastReplicationId);
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("select place from t2 where country='france'")
+ .verifyResults(new String[] {})
+ .verifyReplTargetProperty(replicatedDbName);
+
+ // change the location of the partition via alter command
+ String tmpLocation = "/tmp/" + System.nanoTime();
+ primary.miniDFSCluster.getFileSystem().mkdirs(new Path(tmpLocation), new FsPermission("777"));
+
+ tuple = primary.run("use " + primaryDbName)
+ .run("alter table t2 partition (country='france') set location '" + tmpLocation + "'")
+ .dump(primaryDbName, tuple.lastReplicationId);
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("select place from t2 where country='france'")
+ .verifyResults(new String[] {})
+ .verifyReplTargetProperty(replicatedDbName);
+
+ // Changing location of the external table, should result in changes to the location of
+ // partition residing within the table location and not the partitions located outside.
+ String tmpLocation2 = "/tmp/" + System.nanoTime() + "_2";
+ primary.miniDFSCluster.getFileSystem().mkdirs(new Path(tmpLocation2), new FsPermission("777"));
+
+ tuple = primary.run("use " + primaryDbName)
+ .run("insert into table t2 partition(country='france') values ('lyon')")
+ .run("alter table t2 set location '" + tmpLocation2 + "'")
+ .dump(primaryDbName, tuple.lastReplicationId);
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause);
+ }
+
+ @Test
+ public void externalTableIncrementalReplication() throws Throwable {
+ WarehouseInstance.Tuple tuple = primary.dump("repl dump " + primaryDbName);
+ replica.load(replicatedDbName, tuple.dumpLocation);
+
+ Path externalTableLocation =
+ new Path("/" + testName.getMethodName() + "/t1/");
+ DistributedFileSystem fs = primary.miniDFSCluster.getFileSystem();
+ fs.mkdirs(externalTableLocation, new FsPermission("777"));
+
+ tuple = primary.run("use " + primaryDbName)
+ .run("create external table t1 (place string) partitioned by (country string) row format "
+ + "delimited fields terminated by ',' location '" + externalTableLocation.toString()
+ + "'")
+ .run("alter table t1 add partition(country='india')")
+ .run("alter table t1 add partition(country='us')")
+ .dump(primaryDbName, tuple.lastReplicationId);
+
+ assertFalseExternalFileInfo(new Path(tuple.dumpLocation, FILE_NAME));
+
+ // Add new data externally, to a partition, but under the partition level top directory
+ // Also, it is added after dumping the events but data should be seen at target after REPL LOAD.
+ Path partitionDir = new Path(externalTableLocation, "country=india");
+ try (FSDataOutputStream outputStream = fs.create(new Path(partitionDir, "file.txt"))) {
+ outputStream.write("pune\n".getBytes());
+ outputStream.write("mumbai\n".getBytes());
+ }
+
+ try (FSDataOutputStream outputStream = fs.create(new Path(partitionDir, "file1.txt"))) {
+ outputStream.write("bangalore\n".getBytes());
+ }
+
+ List loadWithClause = externalTableBasePathWithClause();
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't1'")
+ .verifyResult("t1")
+ .run("show partitions t1")
+ .verifyResults(new String[] {"country=india", "country=us"})
+ .run("select place from t1 order by place")
+ .verifyResults(new String[] {})
+ .verifyReplTargetProperty(replicatedDbName);
+
+ // Delete one of the file and update another one.
+ fs.delete(new Path(partitionDir, "file.txt"), true);
+ fs.delete(new Path(partitionDir, "file1.txt"), true);
+ try (FSDataOutputStream outputStream = fs.create(new Path(partitionDir, "file1.txt"))) {
+ outputStream.write("chennai\n".getBytes());
+ }
+
+ // Repl load with zero events but external tables location info should present.
+ tuple = primary.dump(primaryDbName, tuple.lastReplicationId);
+ assertFalseExternalFileInfo(new Path(tuple.dumpLocation, FILE_NAME));
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't1'")
+ .verifyResult("t1")
+ .run("show partitions t1")
+ .verifyResults(new String[] {"country=india", "country=us"})
+ .run("select place from t1 order by place")
+ .verifyResults(new String[] {})
+ .verifyReplTargetProperty(replicatedDbName);
+
+ Hive hive = Hive.get(replica.getConf());
+ Set partitions =
+ hive.getAllPartitionsOf(hive.getTable(replicatedDbName + ".t1"));
+ List paths = partitions.stream().map(p -> p.getDataLocation().toUri().getPath())
+ .collect(Collectors.toList());
+
+ tuple = primary
+ .run("alter table t1 drop partition (country='india')")
+ .run("alter table t1 drop partition (country='us')")
+ .dump(primaryDbName, tuple.lastReplicationId);
+
+ replica.load(replicatedDbName, tuple.dumpLocation)
+ .run("select * From t1")
+ .verifyResults(new String[] {})
+ .verifyReplTargetProperty(replicatedDbName);
+
+ for (String path : paths) {
+ assertTrue(replica.miniDFSCluster.getFileSystem().exists(new Path(path)));
+ }
+ }
+
+ @Test
+ public void bootstrapExternalTablesDuringIncrementalPhase() throws Throwable {
+ List loadWithClause = externalTableBasePathWithClause();
+ List dumpWithClause
+ = Arrays.asList("'" + HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES.varname + "'='false'");
+
+ WarehouseInstance.Tuple tuple = primary
+ .run("use " + primaryDbName)
+ .run("create external table t1 (id int)")
+ .run("insert into table t1 values (1)")
+ .run("insert into table t1 values (2)")
+ .run("create external table t2 (place string) partitioned by (country string)")
+ .run("insert into table t2 partition(country='india') values ('bangalore')")
+ .run("insert into table t2 partition(country='us') values ('austin')")
+ .run("insert into table t2 partition(country='france') values ('paris')")
+ .dump(primaryDbName, null, dumpWithClause);
+
+ // the _external_tables_file info only should be created if external tables are to be replicated not otherwise
+ assertFalse(primary.miniDFSCluster.getFileSystem()
+ .exists(new Path(new Path(tuple.dumpLocation, primaryDbName.toLowerCase()), FILE_NAME)));
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .status(replicatedDbName)
+ .verifyResult(tuple.lastReplicationId)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't1'")
+ .verifyFailure(new String[] {"t1" })
+ .run("show tables like 't2'")
+ .verifyFailure(new String[] {"t2" })
+ .verifyReplTargetProperty(replicatedDbName);
+
+ dumpWithClause = Arrays.asList("'" + HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES.varname + "'='true'",
+ "'" + HiveConf.ConfVars.REPL_BOOTSTRAP_EXTERNAL_TABLES.varname + "'='true'",
+ "'" + HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE.varname + "'='false'");
+ tuple = primary.run("use " + primaryDbName)
+ .run("drop table t1")
+ .run("create external table t3 (id int)")
+ .run("insert into table t3 values (10)")
+ .run("insert into table t3 values (20)")
+ .run("create table t4 as select * from t3")
+ .dump(primaryDbName, tuple.lastReplicationId, dumpWithClause);
+
+ // the _external_tables_file info should be created as external tables are to be replicated.
+ assertTrue(primary.miniDFSCluster.getFileSystem()
+ .exists(new Path(tuple.dumpLocation, FILE_NAME)));
+
+ // verify that the external table info is written correctly for incremental
+ assertExternalFileInfo(Arrays.asList("t2", "t3"),
+ new Path(tuple.dumpLocation, FILE_NAME));
+
+ // _bootstrap directory should be created as bootstrap enabled on external tables.
+ Path dumpPath = new Path(tuple.dumpLocation, INC_BOOTSTRAP_ROOT_DIR_NAME);
+ assertTrue(primary.miniDFSCluster.getFileSystem().exists(dumpPath));
+
+ // _bootstrap//t2
+ // _bootstrap//t3
+ Path dbPath = new Path(dumpPath, primaryDbName);
+ Path tblPath = new Path(dbPath, "t2");
+ assertTrue(primary.miniDFSCluster.getFileSystem().exists(tblPath));
+ tblPath = new Path(dbPath, "t3");
+ assertTrue(primary.miniDFSCluster.getFileSystem().exists(tblPath));
+
+ replica.load(replicatedDbName, tuple.dumpLocation, loadWithClause)
+ .status(replicatedDbName)
+ .verifyResult(tuple.lastReplicationId)
+ .run("use " + replicatedDbName)
+ .run("show tables like 't1'")
+ .verifyFailure(new String[] {"t1" })
+ .run("show tables like 't2'")
+ .verifyResult("t2")
+ .run("show tables like 't3'")
+ .verifyResult("t3")
+ .run("show tables like 't4'")
+ .verifyResult("t4")
+ .verifyReplTargetProperty(replicatedDbName);
+
+ // Ckpt should be set on bootstrapped tables.
+ replica.verifyIfCkptSetForTables(replicatedDbName, Arrays.asList("t2", "t3"), tuple.dumpLocation);
+
+ // Drop source tables to see if target points to correct data or not after bootstrap load.
+ primary.run("use " + primaryDbName)
+ .run("drop table t2")
+ .run("drop table t3");
+
+ // Create table event for t4 should be applied along with bootstrapping of t2 and t3
+ replica.run("use " + replicatedDbName)
+ .run("select place from t2 where country = 'us'")
+ .verifyResult("austin")
+ .run("select place from t2 where country = 'france'")
+ .verifyResult("paris")
+ .run("select id from t3 order by id")
+ .verifyResults(Arrays.asList("10", "20"))
+ .run("select id from t4 order by id")
+ .verifyResults(Arrays.asList("10", "20"))
+ .verifyReplTargetProperty(replicatedDbName);
+ }
+
+ @Test
+ public void testExternalTablesIncReplicationWithConcurrentDropTable() throws Throwable {
+ List dumpWithClause = Collections.singletonList(
+ "'" + HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES.varname + "'='true'"
+ );
+ List loadWithClause = externalTableBasePathWithClause();
+ WarehouseInstance.Tuple tupleBootstrap = primary.run("use " + primaryDbName)
+ .run("create external table t1 (id int)")
+ .run("insert into table t1 values (1)")
+ .dump(primaryDbName, null, dumpWithClause);
+
+ replica.load(replicatedDbName, tupleBootstrap.dumpLocation, loadWithClause);
+
+ // Insert a row into "t1" and create another external table using data from "t1".
+ primary.run("use " + primaryDbName)
+ .run("insert into table t1 values (2)")
+ .run("create external table t2 as select * from t1");
+
+ // Inject a behavior so that getTable returns null for table "t1". This ensures the table is
+ // skipped for data files listing.
+ BehaviourInjection tableNuller = new BehaviourInjection() {
+ @Nullable
+ @Override
+ public Table apply(@Nullable Table table) {
+ LOG.info("Performing injection on table " + table.getTableName());
+ if (table.getTableName().equalsIgnoreCase("t1")){
+ injectionPathCalled = true;
+ return null;
+ } else {
+ nonInjectedPathCalled = true;
+ return table;
+ }
+ }
+ };
+ InjectableBehaviourObjectStore.setGetTableBehaviour(tableNuller);
+ WarehouseInstance.Tuple tupleInc;
+ try {
+ // The t1 table will be skipped from data location listing.
+ tupleInc = primary.dump(primaryDbName, tupleBootstrap.lastReplicationId, dumpWithClause);
+ tableNuller.assertInjectionsPerformed(true, true);
+ } finally {
+ InjectableBehaviourObjectStore.resetGetTableBehaviour(); // reset the behaviour
+ }
+
+ // Only table t2 should exist in the data location list file.
+ assertFalseExternalFileInfo(new Path(tupleInc.dumpLocation, FILE_NAME));
+
+ // The newly inserted data "2" should be missing in table "t1". But, table t2 should exist and have
+ // inserted data.
+ replica.load(replicatedDbName, tupleInc.dumpLocation, loadWithClause)
+ .run("use " + replicatedDbName)
+ .run("select id from t1 order by id")
+ .verifyResult(null)
+ .run("select id from t2 order by id")
+ .verifyResults(new String[]{});
+ }
+
+ @Test
+ public void testIncrementalDumpEmptyDumpDirectory() throws Throwable {
+ List loadWithClause = externalTableBasePathWithClause();
+ List dumpWithClause = Collections.singletonList(
+ "'" + HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES.varname + "'='true'"
+ );
+ WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName)
+ .run("create external table t1 (id int)")
+ .run("insert into table t1 values (1)")
+ .run("insert into table t1 values (2)")
+ .dump(primaryDbName, null, dumpWithClause);
+
+ replica.load(replicatedDbName, tuple.dumpLocation)
+ .status(replicatedDbName)
+ .verifyResult(tuple.lastReplicationId);
+
+ // This looks like an empty dump but it has the ALTER TABLE event created by the previous
+ // dump. We need it here so that the next dump won't have any events.
+ WarehouseInstance.Tuple incTuple = primary.dump(primaryDbName, tuple.lastReplicationId, dumpWithClause);
+ replica.load(replicatedDbName, incTuple.dumpLocation, loadWithClause)
+ .status(replicatedDbName)
+ .verifyResult(incTuple.lastReplicationId);
+
+ // create events for some other database and then dump the primaryDbName to dump an empty directory.
+ primary.run("create database " + extraPrimaryDb + " WITH DBPROPERTIES ( '" +
+ SOURCE_OF_REPLICATION + "' = '1,2,3')");
+ WarehouseInstance.Tuple inc2Tuple = primary.run("use " + extraPrimaryDb)
+ .run("create table tbl (fld int)")
+ .run("use " + primaryDbName)
+ .dump(primaryDbName, incTuple.lastReplicationId, dumpWithClause);
+ Assert.assertEquals(primary.getCurrentNotificationEventId().getEventId(),
+ Long.valueOf(inc2Tuple.lastReplicationId).longValue());
+
+ // Incremental load to existing database with empty dump directory should set the repl id to the last event at src.
+ replica.load(replicatedDbName, inc2Tuple.dumpLocation, loadWithClause)
+ .status(replicatedDbName)
+ .verifyResult(inc2Tuple.lastReplicationId);
+ }
+
+ private List externalTableBasePathWithClause() throws IOException, SemanticException {
+ return ReplicationTestUtils.externalTableBasePathWithClause(REPLICA_EXTERNAL_BASE, replica);
+ }
+
+ private void assertFalseExternalFileInfo(Path externalTableInfoFile)
+ throws IOException {
+ DistributedFileSystem fileSystem = primary.miniDFSCluster.getFileSystem();
+ Assert.assertFalse(fileSystem.exists(externalTableInfoFile));
+ }
+
+ private void assertExternalFileInfo(List expected, Path externalTableInfoFile)
+ throws IOException {
+ ReplicationTestUtils.assertExternalFileInfo(primary, expected, externalTableInfoFile);
+ }
+}
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
index 1815824b5f..0215a39c79 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
@@ -28,7 +28,6 @@
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder;
-import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 608ec04fe7..dc4bf41113 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -430,6 +430,7 @@ minillap.query.files=acid_bucket_pruning.q,\
orc_ppd_schema_evol_3a.q,\
global_limit.q,\
dynamic_partition_pruning_2.q,\
+ dynpart_cast.q,\
results_cache_diff_fs.q,\
tez_union_dynamic_partition.q,\
tez_union_dynamic_partition_2.q,\
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/service/package-info.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/service/package-info.java
deleted file mode 100644
index 46aacf8213..0000000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/service/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package consisting the program LlapServiceDriver (and other classes used by it) which is starting up the llap daemon.
- */
-package org.apache.hadoop.hive.llap.cli.service;
-
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/status/package-info.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/status/package-info.java
deleted file mode 100644
index 79cadc7cb0..0000000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/status/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package consisting the program LlapStatusServiceDriver (and other classes used by it)
- * which is monitoring if Llap is running.
- */
-package org.apache.hadoop.hive.llap.cli.status;
-
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index 1cba7cb599..0296a3d74e 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -174,6 +174,7 @@ KW_OUTPUTDRIVER: 'OUTPUTDRIVER';
KW_ENABLE: 'ENABLE' | 'ENABLED';
KW_DISABLE: 'DISABLE' | 'DISABLED';
KW_EXECUTED: 'EXECUTED';
+KW_EXECUTE: 'EXECUTE';
KW_LOCATION: 'LOCATION';
KW_TABLESAMPLE: 'TABLESAMPLE';
KW_BUCKET: 'BUCKET';
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index b539d76b17..949e57b9ce 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -455,6 +455,7 @@ TOK_INPUTFORMAT;
TOK_WITHIN_GROUP;
TOK_CRON;
TOK_EXECUTED_AS;
+TOK_EXECUTE;
TOK_SCHEDULE;
TOK_EVERY;
}
@@ -2128,6 +2129,7 @@ alterScheduledQueryChange
| executedAsSpec
| enableSpecification
| definedAsSpec
+ | KW_EXECUTE -> ^(TOK_EXECUTE)
;
scheduleSpec
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index e154485463..262afaa361 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -865,7 +865,7 @@ nonReserved
| KW_TIMESTAMPTZ
| KW_DEFAULT
| KW_REOPTIMIZATION
- | KW_EXECUTED | KW_SCHEDULED | KW_CRON | KW_EVERY | KW_AT
+ | KW_EXECUTED | KW_SCHEDULED | KW_CRON | KW_EVERY | KW_AT | KW_EXECUTE
| KW_RESOURCE | KW_PLAN | KW_PLANS | KW_QUERY_PARALLELISM | KW_ACTIVATE | KW_MOVE | KW_DO
| KW_POOL | KW_ALLOC_FRACTION | KW_SCHEDULING_POLICY | KW_PATH | KW_MAPPING | KW_WORKLOAD | KW_MANAGEMENT | KW_ACTIVE | KW_UNMANAGED
| KW_UNKNOWN
diff --git a/pom.xml b/pom.xml
index 2947a2928e..ee7ca0af6e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -150,7 +150,7 @@
3.1.0
0.1.2
0.14.0-incubating
- 1.2.0-3f79e055
+ 1.6.0.1
19.0
2.4.11
1.3.166
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/package-info.java
deleted file mode 100644
index 6bb7206860..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database set location DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.alter.location;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/owner/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/owner/package-info.java
deleted file mode 100644
index f1f70f9e9d..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/owner/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database set owner DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.alter.owner;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/package-info.java
deleted file mode 100644
index 1c78014377..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter Database DDL operations. */
-package org.apache.hadoop.hive.ql.ddl.database.alter;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/poperties/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/poperties/package-info.java
deleted file mode 100644
index f1c07ebbaa..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/poperties/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database set properties DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.alter.poperties;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/package-info.java
deleted file mode 100644
index 899c69163c..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database creation DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/package-info.java
deleted file mode 100644
index be6924913f..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database description DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.desc;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/package-info.java
deleted file mode 100644
index 32ab111de4..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database dropping DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/package-info.java
deleted file mode 100644
index 8777742953..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database locking DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.lock;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/package-info.java
deleted file mode 100644
index 53d733aaf7..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.database;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/show/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/show/package-info.java
deleted file mode 100644
index a582cc7183..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/show/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show databases DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.show;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/showcreate/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/showcreate/package-info.java
deleted file mode 100644
index e42cbacdea..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/showcreate/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show create database DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.showcreate;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/unlock/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/unlock/package-info.java
deleted file mode 100644
index d607074f53..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/unlock/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database unlocking DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.unlock;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/use/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/use/package-info.java
deleted file mode 100644
index 5c9b64e20a..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/use/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Database switching DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.database.use;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/create/package-info.java
deleted file mode 100644
index ebfa2d621a..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Function creation DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.function.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/desc/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/desc/package-info.java
deleted file mode 100644
index 7cddc52d6f..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/desc/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Function describing DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.function.desc;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/drop/package-info.java
deleted file mode 100644
index 6a2904edaf..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Function dropping DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.function.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/package-info.java
deleted file mode 100644
index 30094f6d25..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Macro creation DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.function.macro.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/drop/package-info.java
deleted file mode 100644
index 887acb070a..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Macro dropping DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.function.macro.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/package-info.java
deleted file mode 100644
index c16607d69c..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Function related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.function;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/reload/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/reload/package-info.java
deleted file mode 100644
index 51fd37b49b..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/reload/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Function reloading DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.function.reload;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/show/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/show/package-info.java
deleted file mode 100644
index d69d2b19ce..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/show/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Showing functions DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.function.show;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/conf/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/conf/package-info.java
deleted file mode 100644
index 9983bab740..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/conf/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show Configuration operation. */
-package org.apache.hadoop.hive.ql.ddl.misc.conf;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/flags/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/flags/package-info.java
deleted file mode 100644
index 01b9762ebf..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/flags/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Remove the flag from db property if its already present operation. */
-package org.apache.hadoop.hive.ql.ddl.misc.flags;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/hooks/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/hooks/package-info.java
deleted file mode 100644
index 7ef660282e..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/hooks/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Insert commit hook DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.misc.hooks;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/package-info.java
deleted file mode 100644
index d40666a81a..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Cache metadata DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.misc.metadata;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/msck/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/msck/package-info.java
deleted file mode 100644
index 9371c3e9ca..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/msck/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Msck DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.misc.msck;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/package-info.java
deleted file mode 100644
index 9b7f261e59..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** DDL operation descriptions and operations which can not be classified elsewhere. */
-package org.apache.hadoop.hive.ql.ddl.misc;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/package-info.java
deleted file mode 100644
index 9e79c3678d..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** DDL operations. */
-package org.apache.hadoop.hive.ql.ddl;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/grant/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/grant/package-info.java
deleted file mode 100644
index e08339bd68..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/grant/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Granting DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.grant;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/package-info.java
deleted file mode 100644
index 596a803704..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Privilege related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.privilege;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/revoke/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/revoke/package-info.java
deleted file mode 100644
index 5e37b6951d..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/revoke/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Revoking DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.revoke;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/create/package-info.java
deleted file mode 100644
index 6e958d7397..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Role creation DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.role.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/drop/package-info.java
deleted file mode 100644
index 37f9eae3c9..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Role dropping DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.role.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/grant/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/grant/package-info.java
deleted file mode 100644
index 68b215a7c8..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/grant/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Granting to role DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.role.grant;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/revoke/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/revoke/package-info.java
deleted file mode 100644
index 89213a6daa..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/revoke/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Revoking from role DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.role.revoke;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/set/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/set/package-info.java
deleted file mode 100644
index 9223f8275e..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/set/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Setting role DDL operation.
- */
-package org.apache.hadoop.hive.ql.ddl.privilege.role.set;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/show/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/show/package-info.java
deleted file mode 100644
index cb4f169fe6..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/role/show/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Role showing DDL operations. */
-package org.apache.hadoop.hive.ql.ddl.privilege.role.show;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/grant/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/grant/package-info.java
deleted file mode 100644
index e5beecafc8..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/grant/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Grant showing DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.show.grant;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/principals/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/principals/package-info.java
deleted file mode 100644
index 0e27d31b5b..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/principals/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Principal showing DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.show.principals;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/rolegrant/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/rolegrant/package-info.java
deleted file mode 100644
index ccef832a89..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/show/rolegrant/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Role grant showing DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.privilege.show.rolegrant;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/package-info.java
deleted file mode 100644
index dc78d2cacf..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Abort Transactions DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.process.abort;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/kill/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/kill/package-info.java
deleted file mode 100644
index 3b4d443280..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/kill/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Kill Queries DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.process.kill;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/package-info.java
deleted file mode 100644
index 07fbccc2df..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Process related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.process;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/package-info.java
deleted file mode 100644
index 0255f77f3a..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show Compactions DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.process.show.compactions;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/transactions/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/transactions/package-info.java
deleted file mode 100644
index 27151e8859..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/transactions/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show Transactions DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.process.show.transactions;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/add/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/add/package-info.java
deleted file mode 100644
index 3c94b36545..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/add/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Add columns DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.column.add;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/change/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/change/package-info.java
deleted file mode 100644
index fc08eef57d..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/change/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Change columns DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.column.change;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/replace/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/replace/package-info.java
deleted file mode 100644
index dd75ddaf50..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/replace/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Replace columns DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.column.replace;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/show/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/show/package-info.java
deleted file mode 100644
index e911352d25..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/show/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show columns DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.column.show;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/update/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/update/package-info.java
deleted file mode 100644
index c330aebac5..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/update/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Update columns DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.column.update;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/add/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/add/package-info.java
deleted file mode 100644
index dda814774d..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/add/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Add constraint DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.constraint.add;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/drop/package-info.java
deleted file mode 100644
index 171c41d8ae..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop constraint DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.constraint.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/package-info.java
deleted file mode 100644
index 0c663bcb90..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Table constraint related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.table.constraint;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/like/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/like/package-info.java
deleted file mode 100644
index fa57c92183..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/like/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Create table like DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.create.like;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/package-info.java
deleted file mode 100644
index 6006350daf..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Create table DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/show/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/show/package-info.java
deleted file mode 100644
index db74e82671..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/show/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show create table DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.create.show;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/package-info.java
deleted file mode 100644
index 31c1fef932..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop table DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/package-info.java
deleted file mode 100644
index 2b21cd95b8..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Table info related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.table.info;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/package-info.java
deleted file mode 100644
index 2daf50d4c9..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Table locking related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.table.lock;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/package-info.java
deleted file mode 100644
index 70967b4ec2..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Miscellaneous table related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.table.misc;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java
deleted file mode 100644
index 6fc4730749..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Table related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.table;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/package-info.java
deleted file mode 100644
index e117e53752..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Add partition DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.partition.add;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/package-info.java
deleted file mode 100644
index 9a108e5770..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter partition DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.partition.alter;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/package-info.java
deleted file mode 100644
index d713305513..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop partition DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.partition.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/package-info.java
deleted file mode 100644
index 122bbdf614..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Exchange partition DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.partition.exchange;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/package-info.java
deleted file mode 100644
index 180e7053ad..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Partition related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.table.partition;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/package-info.java
deleted file mode 100644
index cebcf9cdf8..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Rename partition DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.partition.rename;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/package-info.java
deleted file mode 100644
index d49c1e25c2..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show partition DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.table.partition.show;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/package-info.java
deleted file mode 100644
index 082b87d9dd..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Storage related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.table.storage;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/package-info.java
deleted file mode 100644
index a80d2cb3c8..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Create view DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.view.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/drop/package-info.java
deleted file mode 100644
index 2664ee679c..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop view DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.view.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/package-info.java
deleted file mode 100644
index a982b10bc6..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Rebuild materialized view DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.view.materialized.alter.rebuild;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/package-info.java
deleted file mode 100644
index 9f996d4105..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Rewrite materialized view DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.view.materialized.alter.rewrite;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/drop/package-info.java
deleted file mode 100644
index 324a8775a1..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop materialized view DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.view.materialized.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/update/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/update/package-info.java
deleted file mode 100644
index e2cd516aa7..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/update/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Update materialized view DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.view.materialized.update;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/package-info.java
deleted file mode 100644
index 5908b420b2..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** View related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.view;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/package-info.java
deleted file mode 100644
index bee2b391ff..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter WM Mapping DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.alter;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/package-info.java
deleted file mode 100644
index 12c69f67a0..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Create WM Mapping DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/package-info.java
deleted file mode 100644
index c10903e227..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop WM Mapping DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/package-info.java
deleted file mode 100644
index 2d85cef902..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** WM Mapping DDL operations. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java
deleted file mode 100644
index 8e314243d0..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Workload Management related DDL operation descriptions and operations. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/package-info.java
deleted file mode 100644
index 723774b9f2..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter Pool DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.alter;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/package-info.java
deleted file mode 100644
index 5447711758..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Create Pool DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/package-info.java
deleted file mode 100644
index ca51de30fc..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop Pool DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/package-info.java
deleted file mode 100644
index 4397c985a7..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Disable Resource Plan DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.disable;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/package-info.java
deleted file mode 100644
index 6631ad7e1c..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Enable Resource Plan DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.enable;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/package-info.java
deleted file mode 100644
index 5bb28485d6..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter Resource Plan DDL operations. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/package-info.java
deleted file mode 100644
index 2aeb183dab..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter Resource Plan Rename DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.rename;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/package-info.java
deleted file mode 100644
index 40607f89fc..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Replace Resource Plan DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.replace;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/package-info.java
deleted file mode 100644
index f1fdebceef..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter Resource Plan Set DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.set;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/package-info.java
deleted file mode 100644
index 754f46a265..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter Resource Plan Unset DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.unset;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/package-info.java
deleted file mode 100644
index f998608f2e..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter Resource Plan Validate DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.validate;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/package-info.java
deleted file mode 100644
index 3b4ab0e973..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Create Resource Plan DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/package-info.java
deleted file mode 100644
index 48905485fd..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop Resource Plan DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/package-info.java
deleted file mode 100644
index 80918f1326..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Show Resource Plan DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.show;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/package-info.java
deleted file mode 100644
index 1f39b17e8f..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Alter Trigger DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.alter;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/package-info.java
deleted file mode 100644
index 3f72467d5e..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Create Trigger DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.create;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/package-info.java
deleted file mode 100644
index a8fabfb992..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Create Trigger DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/package-info.java
deleted file mode 100644
index 707cfca3da..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Trigger DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/package-info.java
deleted file mode 100644
index 66268fe556..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Add Trigger to Pool DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.add;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/package-info.java
deleted file mode 100644
index 6a2b6b4ee1..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Drop Trigger from Pool DDL operation. */
-package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.drop;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index 622433bb10..e92cd38007 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -172,7 +172,8 @@ private boolean shouldExamineTablesToDump() {
*/
private boolean shouldDumpExternalTableLocation() {
return conf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES)
- && !conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY);
+ && (!conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) &&
+ !conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE));
}
/**
@@ -493,7 +494,8 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb)
Exception caught = null;
boolean shouldWriteExternalTableLocationInfo =
conf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES)
- && !conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY);
+ && (!conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) &&
+ !conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE));
try (Writer writer = new Writer(dbRoot, conf)) {
for (String tblName : Utils.matchesTbl(hiveDb, dbName, work.replScope)) {
LOG.debug("Dumping table: " + tblName + " to db root " + dbRoot.toUri());
@@ -583,6 +585,7 @@ void dumpTable(String dbName, String tblName, String validTxnList, Path dbRoot,
tuple.replicationSpec.setCurrentReplicationState(String.valueOf(lastReplId));
}
MmContext mmCtx = MmContext.createIfNeeded(tableSpec.tableHandle);
+ tuple.replicationSpec.setRepl(true);
new TableExport(
exportPaths, tableSpec, tuple.replicationSpec, hiveDb, distCpDoAsUser, conf, mmCtx).write();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java
index 4c504be894..c7aa0077a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java
@@ -106,7 +106,8 @@ public static Path externalTableDataPath(HiveConf hiveConf, Path basePath, Path
this.hiveConf = hiveConf;
writePath = new Path(dbRoot, FILE_NAME);
includeExternalTables = hiveConf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES);
- dumpMetadataOnly = hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY);
+ dumpMetadataOnly = hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) ||
+ hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE);
if (shouldWrite()) {
this.writer = FileSystem.get(hiveConf).create(writePath);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/schq/ScheduledQueryMaintenanceTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/schq/ScheduledQueryMaintenanceTask.java
index 3d46b18063..fd0c173ea4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/schq/ScheduledQueryMaintenanceTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/schq/ScheduledQueryMaintenanceTask.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionService;
import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryMaintenanceWork;
import org.apache.thrift.TException;
@@ -45,6 +46,9 @@ public int execute() {
ScheduledQueryMaintenanceRequest request = buildScheduledQueryRequest();
try {
Hive.get().getMSC().scheduledQueryMaintenance(request);
+ if (work.getScheduledQuery().isSetNextExecution()) {
+ ScheduledQueryExecutionService.forceScheduleCheck();
+ }
} catch (TException | HiveException e) {
setException(e);
LOG.error("Failed", e);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/package-info.java
deleted file mode 100644
index f723698ffa..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Operator converter for the return path. */
-package org.apache.hadoop.hive.ql.optimizer.calcite.translator.opconventer;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
index 276f759a7e..810a4c5284 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
@@ -49,12 +49,12 @@
import java.util.List;
import java.util.Map;
-import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.Reader;
-import static org.apache.hadoop.hive.ql.exec.repl.ExternalTableCopyTaskBuilder.DirCopyWork;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEQUERYID;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_ENABLE_MOVE_OPTIMIZATION;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_MOVE_OPTIMIZED_FILE_SCHEMES;
+import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.Reader;
+import static org.apache.hadoop.hive.ql.exec.repl.ExternalTableCopyTaskBuilder.DirCopyWork;
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DBNAME;
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_FROM;
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_LIMIT;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index ad3e55a169..8313d99c58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -51,6 +51,8 @@
private boolean isMigratingToTxnTable = false;
private boolean isMigratingToExternalTable = false;
private boolean needDupCopyCheck = false;
+ //Determine if replication is done using repl or export-import
+ private boolean isRepl = false;
// Key definitions related to replication.
public enum KEY {
@@ -438,4 +440,12 @@ public void setNeedDupCopyCheck(boolean isFirstIncPending) {
// Check HIVE-21197 for more detail.
this.needDupCopyCheck = isFirstIncPending;
}
+
+ public boolean isRepl() {
+ return isRepl;
+ }
+
+ public void setRepl(boolean repl) {
+ isRepl = repl;
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java
index 7e78acaee7..66b4653b24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java
@@ -91,6 +91,8 @@ private ScheduledQuery fillScheduledQuery(ScheduledQueryMaintenanceRequestType
// in case the user will change; we have to run an authorization check beforehand
checkAuthorization(type, schqStored);
}
+ // clear the next execution time
+ schqStored.setNextExecutionIsSet(false);
return composeOverlayObject(schqChanges, schqStored);
} catch (TException e) {
throw new SemanticException("unable to get Scheduled query" + e);
@@ -186,6 +188,10 @@ private void processScheduledQueryAstNode(ScheduledQuery schq, ASTNode node) thr
case HiveParser.TOK_QUERY:
schq.setQuery(unparseTree(node.getChild(0)));
return;
+ case HiveParser.TOK_EXECUTE:
+ int now = (int) (System.currentTimeMillis() / 1000);
+ schq.setNextExecution(now);
+ return;
default:
throw new SemanticException("Unexpected token: " + node.getType());
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index c2514eedb1..33d3beba46 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hive.ql.parse;
import static java.util.Objects.nonNull;
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONCONVERT;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS;
import java.io.FileNotFoundException;
@@ -7406,7 +7407,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input)
input = genConstraintsPlan(dest, qb, input);
if (!qb.getIsQuery()) {
- input = genConversionSelectOperator(dest, qb, input, tableDescriptor, dpCtx);
+ input = genConversionSelectOperator(dest, qb, input, tableDescriptor, dpCtx, parts);
}
if (destinationTable.isMaterializedView() &&
@@ -7535,7 +7536,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input)
input = genConstraintsPlan(dest, qb, input);
if (!qb.getIsQuery()) {
- input = genConversionSelectOperator(dest, qb, input, tableDescriptor, dpCtx);
+ input = genConversionSelectOperator(dest, qb, input, tableDescriptor, dpCtx, null);
}
if (destinationTable.isMaterializedView() &&
@@ -8401,7 +8402,8 @@ These props are now enabled elsewhere (see commit diffs). It would be better in
* types that are expected by the table_desc.
*/
private Operator genConversionSelectOperator(String dest, QB qb, Operator input,
- TableDesc table_desc, DynamicPartitionCtx dpCtx) throws SemanticException {
+ TableDesc table_desc, DynamicPartitionCtx dpCtx, List parts)
+ throws SemanticException {
StructObjectInspector oi = null;
try {
Deserializer deserializer = table_desc.getDeserializerClass()
@@ -8483,18 +8485,51 @@ private Operator genConversionSelectOperator(String dest, QB qb, Operator input,
}
expressions.add(column);
}
- }
- // deal with dynamic partition columns: convert ExprNodeDesc type to String??
- if (dynPart && dpCtx != null && dpCtx.getNumDPCols() > 0) {
- // DP columns starts with tableFields.size()
- for (int i = tableFields.size() + (updating(dest) ? 1 : 0); i < rowFields.size(); ++i) {
- TypeInfo rowFieldTypeInfo = rowFields.get(i).getType();
- ExprNodeDesc column = new ExprNodeColumnDesc(
- rowFieldTypeInfo, rowFields.get(i).getInternalName(), "", true);
- expressions.add(column);
+ // deal with dynamic partition columns
+ if (dynPart && dpCtx != null && dpCtx.getNumDPCols() > 0) {
+ // rowFields contains non-partitioned columns (tableFields) followed by DP columns
+ int rowFieldsOffset = tableFields.size() + (updating(dest) ? 1 : 0);
+ for (int dpColIdx = 0; dpColIdx < rowFields.size() - rowFieldsOffset; ++dpColIdx) {
+
+ // create ExprNodeDesc
+ ColumnInfo inputColumn = rowFields.get(dpColIdx + rowFieldsOffset);
+ TypeInfo inputTypeInfo = inputColumn.getType();
+ ExprNodeDesc column =
+ new ExprNodeColumnDesc(inputTypeInfo, inputColumn.getInternalName(), "", true);
+
+ // Cast input column to destination column type if necessary.
+ if (conf.getBoolVar(DYNAMICPARTITIONCONVERT)) {
+ if (parts != null && !parts.isEmpty()) {
+ String destPartitionName = dpCtx.getDPColNames().get(dpColIdx);
+ FieldSchema destPartitionFieldSchema = parts.stream()
+ .filter(dynamicPartition -> dynamicPartition.getName().equals(destPartitionName))
+ .findFirst().orElse(null);
+ if (destPartitionFieldSchema == null) {
+ throw new IllegalStateException("Partition schema for dynamic partition " +
+ destPartitionName + " not found in DynamicPartitionCtx.");
+ }
+ String partitionType = destPartitionFieldSchema.getType();
+ if (partitionType == null) {
+ throw new IllegalStateException("Couldn't get FieldSchema for partition" +
+ destPartitionFieldSchema.getName());
+ }
+ PrimitiveTypeInfo partitionTypeInfo =
+ TypeInfoFactory.getPrimitiveTypeInfo(partitionType);
+ if (!partitionTypeInfo.equals(inputTypeInfo)) {
+ column = ExprNodeTypeCheck.getExprNodeDefaultExprProcessor()
+ .createConversionCast(column, partitionTypeInfo);
+ converted = true;
+ }
+ } else {
+ LOG.warn("Partition schema for dynamic partition " + inputColumn.getAlias() + " ("
+ + inputColumn.getInternalName() + ") not found in DynamicPartitionCtx. "
+ + "This is expected with a CTAS.");
+ }
+ }
+ expressions.add(column);
+ }
}
- // converted = true; // [TODO]: should we check & convert type to String and set it to true?
}
if (converted) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
index d01e24c385..f9648c8961 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
@@ -57,7 +57,7 @@ public HiveWrapper(Hive db, String dbName, long lastReplId) {
public Tuple table(final String tableName, HiveConf conf) throws HiveException {
// Column statistics won't be accurate if we are dumping only metadata
- boolean getColStats = !conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY);
+ boolean getColStats = !Utils.shouldDumpMetaDataOnly(db.getTable(dbName, tableName), conf);
return new Tuple<>(functionForSpec, () -> db.getTable(dbName, tableName, true, false,
getColStats));
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
index 01b7fdc4b6..338a2f39f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
@@ -72,8 +73,8 @@ public TableExport(Paths paths, TableSpec tableSpec, ReplicationSpec replication
? null
: tableSpec;
this.replicationSpec = replicationSpec;
- if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) ||
- (this.tableSpec != null && this.tableSpec.tableHandle.isView())) {
+ if (this.tableSpec != null && this.tableSpec.tableHandle.isView() ||
+ Utils.shouldDumpMetaDataOnly(tableSpec.tableHandle, conf)) {
this.replicationSpec.setIsMetadataOnly(true);
this.tableSpec.tableHandle.setStatsStateLikeNewTable();
@@ -92,7 +93,8 @@ public boolean write() throws SemanticException {
} else if (shouldExport()) {
PartitionIterable withPartitions = getPartitions();
writeMetaData(withPartitions);
- if (!replicationSpec.isMetadataOnly()) {
+ if (!replicationSpec.isMetadataOnly()
+ && !(replicationSpec.isRepl() && tableSpec.tableHandle.getTableType().equals(TableType.EXTERNAL_TABLE))) {
writeData(withPartitions);
}
return true;
@@ -158,10 +160,8 @@ private void writeData(PartitionIterable partitions) throws SemanticException {
} else {
List dataPathList = Utils.getDataPathList(tableSpec.tableHandle.getDataLocation(),
replicationSpec, conf);
-
- // this is the data copy
new FileOperations(dataPathList, paths.dataExportDir(), distCpDoAsUser, conf, mmCtx)
- .export(replicationSpec);
+ .export(replicationSpec);
}
} catch (Exception e) {
throw new SemanticException(e.getMessage(), e);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
index bc9f06dfa9..6f8912b5f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.repl.ReplScope;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -270,4 +271,11 @@ public static boolean shouldReplicate(NotificationEvent tableForEvent,
return Collections.singletonList(fromPath);
}
}
+
+ public static boolean shouldDumpMetaDataOnly(Table table, HiveConf conf) {
+ return conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) ||
+ (conf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES) &&
+ table.getTableType().equals(TableType.EXTERNAL_TABLE) &&
+ conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE));
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
index 0168240829..aedf69870a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
@@ -229,7 +229,7 @@ public void handle(Context withinContext) throws Exception {
// If we are not dumping metadata about a table, we shouldn't be dumping basic statistics
// as well, since that won't be accurate. So reset them to what they would look like for an
// empty table.
- if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
+ if (Utils.shouldDumpMetaDataOnly(qlMdTableAfter, withinContext.hiveConf)) {
qlMdTableAfter.setStatsStateLikeNewTable();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
index 837d51c8c8..355374aeb8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
@@ -19,7 +19,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -66,7 +65,7 @@ public void handle(Context withinContext) throws Exception {
// If we are not dumping data about a table, we shouldn't be dumping basic statistics
// as well, since that won't be accurate. So reset them to what they would look like for an
// empty table.
- if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
+ if (Utils.shouldDumpMetaDataOnly(qlMdTable, withinContext.hiveConf)) {
qlMdTable.setStatsStateLikeNewTable();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java
index 717a4520df..06cfe3f38e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java
@@ -22,6 +22,7 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -44,17 +45,26 @@
private static final Logger LOG = LoggerFactory.getLogger(ScheduledQueryExecutionService.class);
+ private static ScheduledQueryExecutionService INSTANCE = null;
+
private ScheduledQueryExecutionContext context;
private ScheduledQueryExecutor worker;
+ private AtomicInteger forcedScheduleCheckCounter = new AtomicInteger();
public static ScheduledQueryExecutionService startScheduledQueryExecutorService(HiveConf conf0) {
- HiveConf conf = new HiveConf(conf0);
- MetastoreBasedScheduledQueryService qService = new MetastoreBasedScheduledQueryService(conf);
- ExecutorService executor =
- Executors.newCachedThreadPool(
- new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Scheduled Query Thread %d").build());
- ScheduledQueryExecutionContext ctx = new ScheduledQueryExecutionContext(executor, conf, qService);
- return new ScheduledQueryExecutionService(ctx);
+ synchronized (ScheduledQueryExecutionService.class) {
+ if (INSTANCE != null) {
+ throw new IllegalStateException(
+ "There is already a ScheduledQueryExecutionService in service; check it and close it explicitly if neccessary");
+ }
+ HiveConf conf = new HiveConf(conf0);
+ MetastoreBasedScheduledQueryService qService = new MetastoreBasedScheduledQueryService(conf);
+ ExecutorService executor = Executors.newCachedThreadPool(
+ new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Scheduled Query Thread %d").build());
+ ScheduledQueryExecutionContext ctx = new ScheduledQueryExecutionContext(executor, conf, qService);
+ INSTANCE = new ScheduledQueryExecutionService(ctx);
+ return INSTANCE;
+ }
}
public ScheduledQueryExecutionService(ScheduledQueryExecutionContext ctx) {
@@ -83,7 +93,7 @@ public void run() {
}
} else {
try {
- Thread.sleep(context.getIdleSleepTime());
+ sleep(context.getIdleSleepTime());
} catch (InterruptedException e) {
LOG.warn("interrupt discarded");
}
@@ -91,6 +101,17 @@ public void run() {
}
}
+ private void sleep(long idleSleepTime) throws InterruptedException {
+ long checkIntrvalMs = 1000;
+ int origResets = forcedScheduleCheckCounter.get();
+ for (long i = 0; i < idleSleepTime; i += checkIntrvalMs) {
+ Thread.sleep(checkIntrvalMs);
+ if (forcedScheduleCheckCounter.get() != origResets) {
+ return;
+ }
+ }
+ }
+
public synchronized void reportQueryProgress() {
if (info != null) {
LOG.info("Reporting query progress of {} as {} err:{}", info.getScheduledExecutionId(), info.getState(),
@@ -173,15 +194,27 @@ public void run() {
@VisibleForTesting
@Override
public void close() throws IOException {
- context.executor.shutdown();
- try {
- context.executor.awaitTermination(1, TimeUnit.SECONDS);
- context.executor.shutdownNow();
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
+ synchronized (ScheduledQueryExecutionService.class) {
+ if (INSTANCE == null || INSTANCE != this) {
+ throw new IllegalStateException("The current ScheduledQueryExecutionService INSTANCE is invalid");
+ }
+ INSTANCE = null;
+ context.executor.shutdown();
+ try {
+ context.executor.awaitTermination(1, TimeUnit.SECONDS);
+ context.executor.shutdownNow();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
}
+ }
-
+ public static void forceScheduleCheck() {
+ INSTANCE.forcedScheduleCheckCounter.incrementAndGet();
}
+ @VisibleForTesting
+ public static int getForcedScheduleCheckCount() {
+ return INSTANCE.forcedScheduleCheckCounter.get();
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/command/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/command/package-info.java
deleted file mode 100644
index 144382978a..0000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/command/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Command Authorization codes. */
-package org.apache.hadoop.hive.ql.security.authorization.command;
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java b/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
index 336debf968..f2fc421925 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.hive.ql.schq;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -32,6 +34,7 @@
import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionService;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hive.testutils.HiveTestEnvSetup;
+import org.hamcrest.Matchers;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
@@ -187,7 +190,7 @@ public void testAlter() throws ParseException, Exception {
IDriver driver = createDriver();
driver.run("set role admin");
- driver.run("create scheduled query alter1 cron '* * * * * ? *' as select 1 from tu");
+ driver.run("create scheduled query alter1 cron '0 0 7 * * ? *' as select 1 from tu");
driver.run("alter scheduled query alter1 executed as 'user3'");
driver.run("alter scheduled query alter1 defined as select 22 from tu");
@@ -195,10 +198,29 @@ public void testAlter() throws ParseException, Exception {
Optional sq = os.getMScheduledQuery(new ScheduledQueryKey("alter1", "hive"));
assertTrue(sq.isPresent());
assertEquals("user3", sq.get().toThrift().getUser());
+ assertThat(sq.get().getNextExecution(), Matchers.greaterThan((int) (System.currentTimeMillis() / 1000)));
}
}
+ @Test
+ public void testExecuteImmediate() throws ParseException, Exception {
+ IDriver driver = createDriver();
+
+ driver.run("set role admin");
+ driver.run("create scheduled query immed cron '0 0 7 * * ? *' as select 1");
+ int cnt0 = ScheduledQueryExecutionService.getForcedScheduleCheckCount();
+ driver.run("alter scheduled query immed execute");
+
+ try (CloseableObjectStore os = new CloseableObjectStore(env_setup.getTestCtx().hiveConf)) {
+ Optional sq = os.getMScheduledQuery(new ScheduledQueryKey("immed", "hive"));
+ assertTrue(sq.isPresent());
+ assertThat(sq.get().getNextExecution(), Matchers.lessThanOrEqualTo((int) (System.currentTimeMillis() / 1000)));
+ int cnt1 = ScheduledQueryExecutionService.getForcedScheduleCheckCount();
+ assertNotEquals(cnt1, cnt0);
+ }
+ }
+
@Test
public void testImpersonation() throws ParseException, Exception {
HiveConf conf = env_setup.getTestCtx().hiveConf;
diff --git a/ql/src/test/queries/clientpositive/dynpart_cast.q b/ql/src/test/queries/clientpositive/dynpart_cast.q
new file mode 100644
index 0000000000..d28152522f
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/dynpart_cast.q
@@ -0,0 +1,14 @@
+set hive.stats.autogather=true;
+
+drop table dynpart_cast;
+create table dynpart_cast (i int) PARTITIONED BY (`static_part` int, `dyn_part` int);
+
+EXPLAIN
+INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002';
+
+-- stats task will fail here if dynamic partition not cast to integer and creates "dyn_part=002"
+INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002';
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_6.q.out b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
index da3be3e5bb..ff708cb6b0 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
@@ -41,11 +41,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 10) AS STRING) (type: string), CAST( (hash(value) pmod 10) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string), CAST( _col3 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string), _col3 (type: string)
outputColumnNames: key, value, one, two, three
Statistics: Num rows: 500 Data size: 274000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -92,21 +92,21 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int), _col3 (type: int)
+ key expressions: _col2 (type: string), _col3 (type: string)
null sort order: aa
sort order: ++
- Map-reduce partition columns: _col2 (type: int), _col3 (type: int)
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: vectorized
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int), KEY._col3 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
diff --git a/ql/src/test/results/clientpositive/dynpart_cast.q.out b/ql/src/test/results/clientpositive/dynpart_cast.q.out
new file mode 100644
index 0000000000..146a08fb89
--- /dev/null
+++ b/ql/src/test/results/clientpositive/dynpart_cast.q.out
@@ -0,0 +1,165 @@
+PREHOOK: query: drop table dynpart_cast
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table dynpart_cast
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table dynpart_cast (i int) PARTITIONED BY (`static_part` int, `dyn_part` int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dynpart_cast
+POSTHOOK: query: create table dynpart_cast (i int) PARTITIONED BY (`static_part` int, `dyn_part` int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dynpart_cast
+PREHOOK: query: EXPLAIN
+INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002'
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@dynpart_cast@static_part=3
+POSTHOOK: query: EXPLAIN
+INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: _dummy_table
+ Row Limit Per Split: 1
+ Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: 1 (type: int), 2 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dynpart_cast
+ Select Operator
+ expressions: _col0 (type: int), UDFToInteger('3') (type: int), _col1 (type: int)
+ outputColumnNames: i, static_part, dyn_part
+ Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(i, 'hll')
+ keys: static_part (type: int), dyn_part (type: int)
+ minReductionHashAggr: 0.99
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: int)
+ null sort order: zz
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+ Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col2 (type: struct)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0)
+ keys: KEY._col0 (type: int), KEY._col1 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: _col2 (type: struct), _col0 (type: int), _col1 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ dyn_part
+ static_part 3
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dynpart_cast
+
+ Stage: Stage-2
+ Stats Work
+ Basic Stats Work:
+ Column Stats Desc:
+ Columns: i
+ Column Types: int
+ Table: default.dynpart_cast
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dynpart_cast
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dynpart_cast
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002'
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@dynpart_cast@static_part=3
+POSTHOOK: query: INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@dynpart_cast@static_part=3/dyn_part=2
+POSTHOOK: Lineage: dynpart_cast PARTITION(static_part=3,dyn_part=2).i SIMPLE []
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
index 43bb789840..7811f7cbd6 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
@@ -46,21 +46,21 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
- key expressions: _col2 (type: string), _col3 (type: string), _bucket_number (type: string), _col1 (type: string)
+ key expressions: _col2 (type: string), UDFToInteger(UDFToInteger(_col3)) (type: int), _bucket_number (type: string), _col1 (type: string)
null sort order: aaaa
sort order: ++++
- Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
- Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string), UDFToInteger(UDFToInteger(_col3)) (type: int)
+ Statistics: Num rows: 2000 Data size: 732000 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: string)
Execution mode: vectorized
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string), KEY._bucket_number (type: string)
+ expressions: VALUE._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: int), KEY._bucket_number (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _bucket_number
File Output Operator
compressed: false
Dp Sort State: PARTITION_BUCKET_SORTED
- Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 2000 Data size: 732000 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
index f745b46899..52fd083565 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
@@ -57,11 +57,11 @@ STAGE PLANS:
Execution mode: vectorized
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), CAST( VALUE._col2 AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1000 Data size: 99000 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1000 Data size: 279000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string)
outputColumnNames: key, value, hr
Statistics: Num rows: 1000 Data size: 373000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -128,21 +128,21 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 1000 Data size: 99000 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 1000 Data size: 279000 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: vectorized
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 1000 Data size: 99000 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1000 Data size: 279000 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_cast.q.out b/ql/src/test/results/clientpositive/llap/dynpart_cast.q.out
new file mode 100644
index 0000000000..f571961a97
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/dynpart_cast.q.out
@@ -0,0 +1,135 @@
+PREHOOK: query: drop table dynpart_cast
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table dynpart_cast
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table dynpart_cast (i int) PARTITIONED BY (`static_part` int, `dyn_part` int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dynpart_cast
+POSTHOOK: query: create table dynpart_cast (i int) PARTITIONED BY (`static_part` int, `dyn_part` int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dynpart_cast
+PREHOOK: query: EXPLAIN
+INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002'
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@dynpart_cast@static_part=3
+POSTHOOK: query: EXPLAIN
+INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: _dummy_table
+ Row Limit Per Split: 1
+ Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: 1 (type: int), 2 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dynpart_cast
+ Select Operator
+ expressions: _col0 (type: int), UDFToInteger('3') (type: int), _col1 (type: int)
+ outputColumnNames: i, static_part, dyn_part
+ Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(i, 'hll')
+ keys: static_part (type: int), dyn_part (type: int)
+ minReductionHashAggr: 0.0
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: int)
+ null sort order: zz
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+ Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col2 (type: struct)
+ Execution mode: llap
+ LLAP IO: no inputs
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0)
+ keys: KEY._col0 (type: int), KEY._col1 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: _col2 (type: struct), _col0 (type: int), _col1 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ dyn_part
+ static_part 3
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dynpart_cast
+
+ Stage: Stage-3
+ Stats Work
+ Basic Stats Work:
+ Column Stats Desc:
+ Columns: i
+ Column Types: int
+ Table: default.dynpart_cast
+
+PREHOOK: query: INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002'
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@dynpart_cast@static_part=3
+POSTHOOK: query: INSERT INTO TABLE dynpart_cast PARTITION (static_part=03, dyn_part)
+SELECT 1,
+'002'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@dynpart_cast@static_part=3/dyn_part=2
+POSTHOOK: Lineage: dynpart_cast PARTITION(static_part=3,dyn_part=2).i SIMPLE []
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_bucketing.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_bucketing.q.out
index 453d2451df..39c7ae63ac 100644
--- a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_bucketing.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_bucketing.q.out
@@ -604,14 +604,14 @@ STAGE PLANS:
alias: t1_staging
Statistics: Num rows: 51 Data size: 23008 Basic stats: PARTIAL Column stats: PARTIAL
Select Operator
- expressions: a (type: string), CAST( b AS decimal(6,0)) (type: decimal(6,0)), c (type: int), d (type: string), e (type: decimal(18,0))
+ expressions: a (type: string), CAST( b AS decimal(6,0)) (type: decimal(6,0)), c (type: int), d (type: string), CAST( e AS decimal(3,0)) (type: decimal(3,0))
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 51 Data size: 23008 Basic stats: PARTIAL Column stats: PARTIAL
Reduce Output Operator
- key expressions: _col4 (type: decimal(18,0)), _bucket_number (type: string), _col0 (type: string), _col1 (type: decimal(6,0))
+ key expressions: _col4 (type: decimal(3,0)), _bucket_number (type: string), _col0 (type: string), _col1 (type: decimal(6,0))
null sort order: aaaa
sort order: ++++
- Map-reduce partition columns: _col4 (type: decimal(18,0))
+ Map-reduce partition columns: _col4 (type: decimal(3,0))
Statistics: Num rows: 51 Data size: 23008 Basic stats: PARTIAL Column stats: PARTIAL
value expressions: _col2 (type: int), _col3 (type: string)
Execution mode: llap
@@ -620,7 +620,7 @@ STAGE PLANS:
Execution mode: llap
Reduce Operator Tree:
Select Operator
- expressions: KEY._col0 (type: string), KEY._col1 (type: decimal(6,0)), VALUE._col2 (type: int), VALUE._col3 (type: string), KEY._col4 (type: decimal(18,0)), KEY._bucket_number (type: string)
+ expressions: KEY._col0 (type: string), KEY._col1 (type: decimal(6,0)), VALUE._col2 (type: int), VALUE._col3 (type: string), KEY._col4 (type: decimal(3,0)), KEY._bucket_number (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
File Output Operator
compressed: false
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge1.q.out b/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
index 9da73e65ac..44a5d6f290 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
@@ -73,19 +73,19 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1_n1
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -211,19 +211,19 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1b_n1
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -389,19 +389,19 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1c_n1
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge10.q.out b/ql/src/test/results/clientpositive/llap/orc_merge10.q.out
index a6ea33493f..3bf3710c69 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge10.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge10.q.out
@@ -74,11 +74,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -96,11 +96,11 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 283250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: struct), _col3 (type: struct)
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -128,12 +128,12 @@ STAGE PLANS:
Execution mode: llap
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -222,11 +222,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -244,11 +244,11 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 283250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: struct), _col3 (type: struct)
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -276,12 +276,12 @@ STAGE PLANS:
Execution mode: llap
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -415,11 +415,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -437,11 +437,11 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 283250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: struct), _col3 (type: struct)
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -469,12 +469,12 @@ STAGE PLANS:
Execution mode: llap
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge2.q.out b/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
index 9b0d3b4234..19ca90c2ad 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
@@ -48,11 +48,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 10) AS STRING) (type: string), CAST( (hash(value) pmod 10) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string), CAST( _col3 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string), _col3 (type: string)
outputColumnNames: key, value, one, two, three
Statistics: Num rows: 500 Data size: 274000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -70,11 +70,11 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 658500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col3 (type: struct), _col4 (type: struct)
Reduce Output Operator
- key expressions: _col2 (type: int), _col3 (type: int)
+ key expressions: _col2 (type: string), _col3 (type: string)
null sort order: aa
sort order: ++
- Map-reduce partition columns: _col2 (type: int), _col3 (type: int)
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -102,12 +102,12 @@ STAGE PLANS:
Execution mode: llap
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int), KEY._col3 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out
index d35f44b10a..3215f09ac8 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out
@@ -74,11 +74,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -96,11 +96,11 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 283250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: struct), _col3 (type: struct)
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -128,12 +128,12 @@ STAGE PLANS:
Execution mode: llap
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -222,11 +222,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -244,11 +244,11 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 283250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: struct), _col3 (type: struct)
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -276,12 +276,12 @@ STAGE PLANS:
Execution mode: llap
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -415,11 +415,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -437,11 +437,11 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 283250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: struct), _col3 (type: struct)
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -469,12 +469,12 @@ STAGE PLANS:
Execution mode: llap
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
diff --git a/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out b/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out
index fcff20a68e..59f4acd59b 100644
--- a/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out
+++ b/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out
@@ -48,11 +48,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 10) AS STRING) (type: string), CAST( (hash(value) pmod 10) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string), CAST( _col3 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string), _col3 (type: string)
outputColumnNames: key, value, one, two, three
Statistics: Num rows: 500 Data size: 274000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -70,11 +70,11 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 658500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col3 (type: struct), _col4 (type: struct)
Reduce Output Operator
- key expressions: _col2 (type: int), _col3 (type: int)
+ key expressions: _col2 (type: string), _col3 (type: string)
null sort order: aa
sort order: ++
- Map-reduce partition columns: _col2 (type: int), _col3 (type: int)
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -102,12 +102,12 @@ STAGE PLANS:
Execution mode: vectorized, llap
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int), KEY._col3 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
diff --git a/ql/src/test/results/clientpositive/llap/tez_dml.q.out b/ql/src/test/results/clientpositive/llap/tez_dml.q.out
index 4ad78d8582..d716b63012 100644
--- a/ql/src/test/results/clientpositive/llap/tez_dml.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_dml.q.out
@@ -489,19 +489,19 @@ STAGE PLANS:
alias: tmp_src
Statistics: Num rows: 309 Data size: 30591 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: value (type: string), cnt (type: bigint)
+ expressions: value (type: string), UDFToInteger(cnt) (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 309 Data size: 30591 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 309 Data size: 29355 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 309 Data size: 30591 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 309 Data size: 29355 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.tmp_src_part
Select Operator
- expressions: _col0 (type: string), UDFToInteger(_col1) (type: int)
+ expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: c, d
Statistics: Num rows: 309 Data size: 29355 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
diff --git a/ql/src/test/results/clientpositive/orc_merge1.q.out b/ql/src/test/results/clientpositive/orc_merge1.q.out
index 9c07816340..8a4aade0d4 100644
--- a/ql/src/test/results/clientpositive/orc_merge1.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge1.q.out
@@ -66,19 +66,19 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1_n1
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -185,19 +185,19 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1b_n1
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -343,19 +343,19 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1c_n1
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
diff --git a/ql/src/test/results/clientpositive/orc_merge10.q.out b/ql/src/test/results/clientpositive/orc_merge10.q.out
index 4a5f03c82f..0b9b6649c5 100644
--- a/ql/src/test/results/clientpositive/orc_merge10.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge10.q.out
@@ -67,11 +67,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -118,20 +118,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -210,11 +210,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -261,20 +261,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -392,11 +392,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -443,20 +443,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
diff --git a/ql/src/test/results/clientpositive/orc_merge2.q.out b/ql/src/test/results/clientpositive/orc_merge2.q.out
index d132d62b18..2997a4f272 100644
--- a/ql/src/test/results/clientpositive/orc_merge2.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge2.q.out
@@ -41,11 +41,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 10) AS STRING) (type: string), CAST( (hash(value) pmod 10) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string), CAST( _col3 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string), _col3 (type: string)
outputColumnNames: key, value, one, two, three
Statistics: Num rows: 500 Data size: 274000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -92,20 +92,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int), _col3 (type: int)
+ key expressions: _col2 (type: string), _col3 (type: string)
null sort order: aa
sort order: ++
- Map-reduce partition columns: _col2 (type: int), _col3 (type: int)
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int), KEY._col3 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 231500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
diff --git a/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out
index 7f9a04b09f..3806c36390 100644
--- a/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out
@@ -67,11 +67,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -118,20 +118,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -210,11 +210,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -261,20 +261,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -392,11 +392,11 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), CAST( _col2 AS STRING) (type: string)
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
@@ -443,20 +443,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
- Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
diff --git a/ql/src/test/results/clientpositive/smb_join_partition_key.q.out b/ql/src/test/results/clientpositive/smb_join_partition_key.q.out
index c18d01d26a..34246d1803 100644
--- a/ql/src/test/results/clientpositive/smb_join_partition_key.q.out
+++ b/ql/src/test/results/clientpositive/smb_join_partition_key.q.out
@@ -109,20 +109,20 @@ PREHOOK: Output: default@smb_table_part
POSTHOOK: query: INSERT OVERWRITE TABLE smb_table_part PARTITION (p1) SELECT key, value, 100 as p1 FROM data_table
POSTHOOK: type: QUERY
POSTHOOK: Input: default@data_table
-POSTHOOK: Output: default@smb_table_part@p1=100
-POSTHOOK: Lineage: smb_table_part PARTITION(p1=100).key SIMPLE [(data_table)data_table.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: smb_table_part PARTITION(p1=100).value SIMPLE [(data_table)data_table.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Output: default@smb_table_part@p1=100.0
+POSTHOOK: Lineage: smb_table_part PARTITION(p1=100.0).key SIMPLE [(data_table)data_table.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_table_part PARTITION(p1=100.0).value SIMPLE [(data_table)data_table.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: SELECT s1.key, s2.p1 FROM smb_table s1 INNER JOIN smb_table_part s2 ON s1.key = s2.key ORDER BY s1.key
PREHOOK: type: QUERY
PREHOOK: Input: default@smb_table
PREHOOK: Input: default@smb_table_part
-PREHOOK: Input: default@smb_table_part@p1=100
+PREHOOK: Input: default@smb_table_part@p1=100.0
#### A masked pattern was here ####
POSTHOOK: query: SELECT s1.key, s2.p1 FROM smb_table s1 INNER JOIN smb_table_part s2 ON s1.key = s2.key ORDER BY s1.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@smb_table
POSTHOOK: Input: default@smb_table_part
-POSTHOOK: Input: default@smb_table_part@p1=100
+POSTHOOK: Input: default@smb_table_part@p1=100.0
#### A masked pattern was here ####
1 100.0
2 100.0
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
index 56d5ed945b..9efcf98dd8 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
@@ -63,21 +63,21 @@ STAGE PLANS:
Execution mode: vectorized
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), CAST( VALUE._col2 AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
+ Map-reduce partition columns: _col2 (type: string)
Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: string)
Reducer 3
Execution mode: vectorized
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
index 977c4cbfc1..5c95429418 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
@@ -69,7 +69,7 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -152,7 +152,7 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -279,7 +279,7 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge2.q.out b/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
index 4647b86ea3..089be29c53 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
@@ -50,20 +50,20 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 10) AS STRING) (type: string), CAST( (hash(value) pmod 10) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col2 (type: int), _col3 (type: int)
+ key expressions: _col2 (type: string), _col3 (type: string)
null sort order: aa
sort order: ++
- Map-reduce partition columns: _col2 (type: int), _col3 (type: int)
+ Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: string)
Reducer 2
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int), KEY._col3 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
File Output Operator
compressed: false
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out
index b7d3dd725d..d58642db70 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out
@@ -71,20 +71,20 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
+ Map-reduce partition columns: _col2 (type: string)
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: string)
Reducer 2
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
@@ -168,20 +168,20 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
+ Map-reduce partition columns: _col2 (type: string)
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: string)
Reducer 2
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
@@ -310,20 +310,20 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col2 (type: int)
+ key expressions: _col2 (type: string)
null sort order: a
sort order: +
- Map-reduce partition columns: _col2 (type: int)
+ Map-reduce partition columns: _col2 (type: string)
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: string)
Reducer 2
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: int)
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
diff --git a/serde/pom.xml b/serde/pom.xml
index 9f1b146d02..10fa1b7c1e 100644
--- a/serde/pom.xml
+++ b/serde/pom.xml
@@ -70,8 +70,8 @@
${hppc.version}
- com.vlkan
- flatbuffers
+ com.github.davidmoten
+ flatbuffers-java
${flatbuffers.version}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 317c9cbae9..3eff37f041 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -12856,11 +12856,15 @@ public void scheduledQueryUpdate(ScheduledQuery scheduledQuery)
openTransaction();
MScheduledQuery persisted = existing.get();
persisted.doUpdate(schq);
- Integer nextExecutionTime = computeNextExecutionTime(schq.getSchedule());
- if (nextExecutionTime == null) {
- throw new InvalidInputException("Invalid schedule: " + schq.getSchedule());
+ if (!scheduledQuery.isSetNextExecution()) {
+ Integer nextExecutionTime = computeNextExecutionTime(schq.getSchedule());
+ if (nextExecutionTime == null) {
+ throw new InvalidInputException("Invalid schedule: " + schq.getSchedule());
+ }
+ persisted.setNextExecution(nextExecutionTime);
+ } else {
+ persisted.setNextExecution(schq.getNextExecution());
}
- persisted.setNextExecution(nextExecutionTime);
pm.makePersistent(persisted);
commited = commitTransaction();
} finally {
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/package-info.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/package-info.java
deleted file mode 100644
index 3b4574f0db..0000000000
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains the HiveMetaTool program and the classes used by it.
- */
-package org.apache.hadoop.hive.metastore.tools.metatool;
-