diff --git packaging/pom.xml packaging/pom.xml
index cae1fa9822..b58338fc60 100644
--- packaging/pom.xml
+++ packaging/pom.xml
@@ -284,7 +284,12 @@
org.apache.hive
- hive-upgrade-acid
+ hive-pre-upgrade
+ ${project.version}
+
+
+ org.apache.hive
+ hive-post-upgrade
${project.version}
diff --git upgrade-acid/pom.xml upgrade-acid/pom.xml
index 77cd24094d..b5443032fd 100644
--- upgrade-acid/pom.xml
+++ upgrade-acid/pom.xml
@@ -17,7 +17,10 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
+
org.apache
apache
18
@@ -29,7 +32,7 @@
4.0.0-SNAPSHOT
hive-upgrade-acid
Hive Upgrade Acid
- jar
+ pom
@@ -47,250 +50,10 @@
${basedir}/checkstyle/
2.17
2.20.1
-
-
- ${project.build.directory}/testconf
- file://
- ${project.basedir}/src/test/resources
- ${project.build.directory}/tmp
- ${project.build.directory}/warehouse
- file://
- 1
- true
-
-
-
- commons-cli
- commons-cli
- 1.2
- provided
-
-
- org.apache.hive
- hive-metastore
- 2.3.3
- provided
-
-
- org.apache.hive
- hive-exec
- 2.3.3
- provided
-
-
- org.apache.hadoop
- hadoop-common
- 2.7.2
- provided
-
-
-
- org.apache.hadoop
- hadoop-mapreduce-client-common
- 2.7.2
- provided
-
-
- org.apache.orc
- orc-core
- 1.3.3
- provided
-
-
-
-
-
-
-
- ${basedir}/src/main/resources
-
- package.jdo
-
-
-
+
+ pre-upgrade
+ post-upgrade
+
-
-
-
- org.apache.maven.plugins
- maven-antrun-plugin
- ${maven.antrun.plugin.version}
-
-
- ant-contrib
- ant-contrib
- ${ant.contrib.version}
-
-
- ant
- ant
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-checkstyle-plugin
- ${maven.checkstyle.plugin.version}
-
-
- org.codehaus.mojo
- exec-maven-plugin
- ${maven.exec.plugin.version}
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-antrun-plugin
-
-
- setup-test-dirs
- process-test-resources
-
- run
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- setup-metastore-scripts
- process-test-resources
-
- run
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-failsafe-plugin
- 2.20.1
-
-
-
- integration-test
- verify
-
-
-
-
- true
- false
- -Xmx2048m
- false
-
- true
- ${test.tmp.dir}
- ${test.tmp.dir}
- true
-
-
- ${log4j.conf.dir}
-
- ${skipITests}
-
-
-
- org.apache.maven.plugins
- maven-surefire-plugin
- ${maven.surefire.version}
-
- true
- false
- ${test.forkcount}
- -Xmx2048m
- false
-
- ${project.build.directory}
- true
- ${derby.version}
- ${test.tmp.dir}/derby.log
-
- ${test.log4j.scheme}${test.conf.dir}/hive-log4j2.properties
- true
- ${test.tmp.dir}
-
- jdbc:derby:${test.tmp.dir}/junit_metastore_db;create=true
- false
- ${test.tmp.dir}
- ${test.warehouse.scheme}${test.warehouse.dir}
-
-
-
- ${log4j.conf.dir}
- ${test.conf.dir}
-
- ${test.conf.dir}/conf
-
-
-
-
- org.apache.maven.plugins
- maven-jar-plugin
-
-
-
- test-jar
-
-
-
-
-
-
\ No newline at end of file
diff --git upgrade-acid/post-upgrade/pom.xml upgrade-acid/post-upgrade/pom.xml
new file mode 100644
index 0000000000..c0a901a4a5
--- /dev/null
+++ upgrade-acid/post-upgrade/pom.xml
@@ -0,0 +1,289 @@
+
+
+
+
+
+ org.apache.hive
+ hive-upgrade-acid
+ 4.0.0-SNAPSHOT
+ ../pom.xml
+
+
+
+ 4.0.0
+ 4.0.0-SNAPSHOT
+ hive-post-upgrade
+ Hive Post Upgrade Acid
+ jar
+
+
+
+ 3.1.0
+ 1.2
+ 1.5.0
+
+ ../..
+
+
+ ${project.build.directory}/testconf
+ file://
+ ${project.basedir}/src/test/resources
+ ${project.build.directory}/tmp
+ ${project.build.directory}/warehouse
+ file://
+ 1
+ true
+
+
+
+
+ commons-cli
+ commons-cli
+ ${commons-cli.version}
+ provided
+
+
+ org.apache.hive
+ hive-metastore
+ ${project.version}
+ provided
+
+
+ org.apache.hive
+ hive-exec
+ ${project.version}
+ provided
+
+
+ org.apache.hadoop
+ hadoop-common
+ ${hadoop.version}
+ provided
+
+
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-common
+ ${hadoop.version}
+ provided
+
+
+ org.apache.orc
+ orc-core
+ ${orc.version}
+ provided
+
+
+
+
+
+
+
+ ${basedir}/src/main/resources
+
+ package.jdo
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+ ${maven.antrun.plugin.version}
+
+
+ ant-contrib
+ ant-contrib
+ ${ant.contrib.version}
+
+
+ ant
+ ant
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-checkstyle-plugin
+ ${maven.checkstyle.plugin.version}
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ ${maven.exec.plugin.version}
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ setup-test-dirs
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ setup-metastore-scripts
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-failsafe-plugin
+ 2.20.1
+
+
+
+ integration-test
+ verify
+
+
+
+
+ true
+ false
+ -Xmx2048m
+ false
+
+ true
+ ${test.tmp.dir}
+ ${test.tmp.dir}
+ true
+
+
+ ${log4j.conf.dir}
+
+ ${skipITests}
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+ ${maven.surefire.version}
+
+ true
+ false
+ ${test.forkcount}
+ -Xmx2048m
+ false
+
+ ${project.build.directory}
+ true
+ ${derby.version}
+ ${test.tmp.dir}/derby.log
+
+ ${test.log4j.scheme}${test.conf.dir}/hive-log4j2.properties
+ true
+ ${test.tmp.dir}
+
+ jdbc:derby:${test.tmp.dir}/junit_metastore_db;create=true
+ false
+ ${test.tmp.dir}
+ ${test.warehouse.scheme}${test.warehouse.dir}
+
+
+
+ ${log4j.conf.dir}
+ ${test.conf.dir}
+
+ ${test.conf.dir}/conf
+
+
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+
+ test-jar
+
+
+
+
+
+
+
\ No newline at end of file
diff --git upgrade-acid/post-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/UpgradeTool.java upgrade-acid/post-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/UpgradeTool.java
new file mode 100644
index 0000000000..d5b9a6668a
--- /dev/null
+++ upgrade-acid/post-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/UpgradeTool.java
@@ -0,0 +1,390 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.upgrade.acid;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hive.common.util.HiveVersionInfo;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+
+/**
+ * This utility is designed to help with upgrading to Hive 3.0. On-disk layout for transactional
+ * tables has changed in 3.0 and require pre-processing before upgrade to ensure they are readable
+ * by Hive 3.0. Some transactional tables (identified by this utility) require Major compaction
+ * to be run on them before upgrading to 3.0. Once this compaction starts, no more
+ * update/delete/merge statements may be executed on these tables until upgrade is finished.
+ *
+ * Additionally, a new type of transactional tables was added in 3.0 - insert-only tables. These
+ * tables support ACID semantics and work with any Input/OutputFormat. Any Managed tables may
+ * be made insert-only transactional table. These tables don't support Update/Delete/Merge commands.
+ *
+ * This utility works in 2 modes: preUpgrade and postUpgrade.
+ * In preUpgrade mode it has to have 2.x Hive jars on the classpath. It will perform analysis on
+ * existing transactional tables, determine which require compaction and generate a set of SQL
+ * commands to launch all of these compactions.
+ *
+ * Note that depending on the number of tables/partitions and amount of data in them compactions
+ * may take a significant amount of time and resources. The script output by this utility includes
+ * some heuristics that may help estimate the time required. If no script is produced, no action
+ * is needed. For compactions to run an instance of standalone Hive Metastore must be running.
+ * Please make sure hive.compactor.worker.threads is sufficiently high - this specifies the limit
+ * of concurrent compactions that may be run. Each compaction job is a Map-Reduce job.
+ * hive.compactor.job.queue may be used to set a Yarn queue ame where all compaction jobs will be
+ * submitted.
+ *
+ * In postUpgrade mode, Hive 3.0 jars/hive-site.xml should be on the classpath. This utility will
+ * find all the tables that may be made transactional (with ful CRUD support) and generate
+ * Alter Table commands to do so. It will also find all tables that may not support full CRUD
+ * but can be made insert-only transactional tables and generate corresponding Alter Table commands.
+ *
+ * TODO: rename files
+ *
+ * "execute" option may be supplied in both modes to have the utility automatically execute the
+ * equivalent of the generated commands
+ *
+ * "location" option may be supplied followed by a path to set the location for the generated
+ * scripts.
+ */
+public class UpgradeTool {
+ private static final Logger LOG = LoggerFactory.getLogger(UpgradeTool.class);
+ private static final int PARTITION_BATCH_SIZE = 10000;
+ private final Options cmdLineOptions = new Options();
+
+ public static void main(String[] args) throws Exception {
+ UpgradeTool tool = new UpgradeTool();
+ tool.init();
+ CommandLineParser parser = new GnuParser();
+ CommandLine line ;
+ String outputDir = ".";
+ boolean execute = false;
+ try {
+ line = parser.parse(tool.cmdLineOptions, args);
+ } catch (ParseException e) {
+ System.err.println("UpgradeTool: Parsing failed. Reason: " + e.getLocalizedMessage());
+ printAndExit(tool);
+ return;
+ }
+ if (line.hasOption("help")) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("upgrade-acid", tool.cmdLineOptions);
+ return;
+ }
+ if(line.hasOption("location")) {
+ outputDir = line.getOptionValue("location");
+ }
+ if(line.hasOption("execute")) {
+ execute = true;
+ }
+ LOG.info("Starting with execute=" + execute + ", location=" + outputDir);
+
+ try {
+ String hiveVer = HiveVersionInfo.getShortVersion();
+ if(!hiveVer.startsWith("3.")) {
+ throw new IllegalStateException("postUpgrade w/execute requires Hive 3.x. Actual: " +
+ hiveVer);
+ }
+ tool.performUpgradeInternal(outputDir, execute);
+ }
+ catch(Exception ex) {
+ LOG.error("UpgradeTool failed", ex);
+ throw ex;
+ }
+ }
+ private static void printAndExit(UpgradeTool tool) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("upgrade-acid", tool.cmdLineOptions);
+ System.exit(1);
+ }
+
+ private void init() {
+ try {
+ cmdLineOptions.addOption(new Option("help", "Generates a script to execute on 3.x " +
+ "cluster. This requires 3.x binaries on the classpath and hive-site.xml."));
+ Option exec = new Option("execute",
+ "Executes commands equivalent to generated scrips");
+ exec.setOptionalArg(true);
+ cmdLineOptions.addOption(exec);
+ cmdLineOptions.addOption(new Option("location", true,
+ "Location to write scripts to. Default is CWD."));
+ }
+ catch(Exception ex) {
+ LOG.error("init()", ex);
+ throw ex;
+ }
+ }
+ /**
+ * todo: this should accept a file of table names to exclude from non-acid to acid conversion
+ * todo: change script comments to a preamble instead of a footer
+ *
+ * how does rename script work? "hadoop fs -mv oldname newname" * and what what about S3?
+ * How does this actually get executed?
+ * all other actions are done via embedded JDBC
+ */
+ private void performUpgradeInternal(String scriptLocation, boolean execute)
+ throws HiveException, TException, IOException {
+ HiveConf conf = hiveConf != null ? hiveConf : new HiveConf();
+ boolean isAcidEnabled = isAcidEnabled(conf);
+ HiveMetaStoreClient hms = new HiveMetaStoreClient(conf);//MetaException
+ LOG.debug("Looking for databases");
+ List databases = hms.getAllDatabases();//TException
+ LOG.debug("Found " + databases.size() + " databases to process");
+ List convertToAcid = new ArrayList<>();
+ List convertToMM = new ArrayList<>();
+ Hive db = null;
+ if(execute) {
+ db = Hive.get(conf);
+ }
+
+ for(String dbName : databases) {
+ List tables = hms.getAllTables(dbName);
+ LOG.debug("found " + tables.size() + " tables in " + dbName);
+ for(String tableName : tables) {
+ Table t = hms.getTable(dbName, tableName);
+ LOG.debug("processing table " + Warehouse.getQualifiedName(t));
+ if(isAcidEnabled) {
+ //if acid is off post upgrade, you can't make any tables acid - will throw
+ processConversion(t, convertToAcid, convertToMM, hms, db, execute);
+ }
+ /*todo: handle renaming files somewhere*/
+ }
+ }
+ makeConvertTableScript(convertToAcid, convertToMM, scriptLocation);
+ makeRenameFileScript(scriptLocation);//todo: is this pre or post upgrade?
+ //todo: can different tables be in different FileSystems?
+ }
+
+ /**
+ * Actually makes the table transactional
+ */
+ private static void alterTable(Table t, Hive db, boolean isMM)
+ throws HiveException, InvalidOperationException {
+ org.apache.hadoop.hive.ql.metadata.Table metaTable =
+ //clone to make sure new prop doesn't leak
+ new org.apache.hadoop.hive.ql.metadata.Table(t.deepCopy());
+ metaTable.getParameters().put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
+ if(isMM) {
+ metaTable.getParameters()
+ .put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, "insert_only");
+ }
+ db.alterTable(Warehouse.getQualifiedName(t), metaTable, false, null);
+ }
+
+ /**
+ * todo: handle exclusion list
+ * Figures out which tables to make Acid, MM and (optionally, performs the operation)
+ */
+ private static void processConversion(Table t, List convertToAcid,
+ List convertToMM, HiveMetaStoreClient hms, Hive db, boolean execute)
+ throws TException, HiveException {
+ if(isFullAcidTable(t)) {
+ return;
+ }
+ if(!TableType.MANAGED_TABLE.name().equalsIgnoreCase(t.getTableType())) {
+ return;
+ }
+ String fullTableName = Warehouse.getQualifiedName(t);
+ if(t.getPartitionKeysSize() <= 0) {
+ if(canBeMadeAcid(fullTableName, t.getSd())) {
+ convertToAcid.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" +
+ "'transactional'='true')");
+ if(execute) {
+ alterTable(t, db, false);
+ }
+ }
+ else {
+ convertToMM.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" +
+ "'transactional'='true', 'transactional_properties'='insert_only')");
+ if(execute) {
+ alterTable(t, db, true);
+ }
+ }
+ }
+ else {
+ /*
+ each Partition may have different I/O Format so have to check them all before deciding to
+ make a full CRUD table.
+ Run in batches to prevent OOM
+ */
+ List partNames = hms.listPartitionNames(t.getDbName(), t.getTableName(), (short)-1);
+ int batchSize = PARTITION_BATCH_SIZE;
+ int numWholeBatches = partNames.size()/batchSize;
+ for(int i = 0; i < numWholeBatches; i++) {
+ List partitionList = hms.getPartitionsByNames(t.getDbName(), t.getTableName(),
+ partNames.subList(i * batchSize, (i + 1) * batchSize));
+ if(alterTable(fullTableName, partitionList, convertToMM, t, db, execute)) {
+ return;
+ }
+ }
+ if(numWholeBatches * batchSize < partNames.size()) {
+ //last partial batch
+ List partitionList = hms.getPartitionsByNames(t.getDbName(), t.getTableName(),
+ partNames.subList(numWholeBatches * batchSize, partNames.size()));
+ if(alterTable(fullTableName, partitionList, convertToMM, t, db, execute)) {
+ return;
+ }
+ }
+ //if here checked all parts and they are Acid compatible - make it acid
+ convertToAcid.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" +
+ "'transactional'='true')");
+ if(execute) {
+ alterTable(t, db, false);
+ }
+ }
+ }
+ /**
+ * @return true if table was converted/command generated
+ */
+ private static boolean alterTable(String fullTableName, List partitionList,
+ List convertToMM, Table t, Hive db, boolean execute)
+ throws InvalidOperationException, HiveException {
+ for(Partition p : partitionList) {
+ if(!canBeMadeAcid(fullTableName, p.getSd())) {
+ convertToMM.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" +
+ "'transactional'='true', 'transactional_properties'='insert_only')");
+ if(execute) {
+ alterTable(t, db, true);
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+ private static boolean canBeMadeAcid(String fullTableName, StorageDescriptor sd) {
+ return isAcidInputOutputFormat(fullTableName, sd) && sd.getSortColsSize() <= 0;
+ }
+ private static boolean isAcidInputOutputFormat(String fullTableName, StorageDescriptor sd) {
+ try {
+ Class inputFormatClass = sd.getInputFormat() == null ? null :
+ Class.forName(sd.getInputFormat());
+ Class outputFormatClass = sd.getOutputFormat() == null ? null :
+ Class.forName(sd.getOutputFormat());
+
+ if (inputFormatClass != null && outputFormatClass != null &&
+ Class.forName("org.apache.hadoop.hive.ql.io.AcidInputFormat")
+ .isAssignableFrom(inputFormatClass) &&
+ Class.forName("org.apache.hadoop.hive.ql.io.AcidOutputFormat")
+ .isAssignableFrom(outputFormatClass)) {
+ return true;
+ }
+ } catch (ClassNotFoundException e) {
+ //if a table is using some custom I/O format and it's not in the classpath, we won't mark
+ //the table for Acid, but today (Hive 3.1 and earlier) OrcInput/OutputFormat is the only
+ //Acid format
+ LOG.error("Could not determine if " + fullTableName +
+ " can be made Acid due to: " + e.getMessage(), e);
+ return false;
+ }
+ return false;
+ }
+ private static void makeConvertTableScript(List alterTableAcid, List alterTableMm,
+ String scriptLocation) throws IOException {
+ if (alterTableAcid.isEmpty()) {
+ LOG.info("No acid conversion is necessary");
+ } else {
+ String fileName = "convertToAcid_" + System.currentTimeMillis() + ".sql";
+ LOG.debug("Writing CRUD conversion commands to " + fileName);
+ try(PrintWriter pw = createScript(alterTableAcid, fileName, scriptLocation)) {
+ //todo: fix this - it has to run in 3.0 since tables may be unbucketed
+ pw.println("-- These commands may be executed by Hive 1.x later");
+ }
+ }
+
+ if (alterTableMm.isEmpty()) {
+ LOG.info("No managed table conversion is necessary");
+ } else {
+ String fileName = "convertToMM_" + System.currentTimeMillis() + ".sql";
+ LOG.debug("Writing managed table conversion commands to " + fileName);
+ try(PrintWriter pw = createScript(alterTableMm, fileName, scriptLocation)) {
+ pw.println("-- These commands must be executed by Hive 3.0 or later");
+ }
+ }
+ }
+
+ private static PrintWriter createScript(List commands, String fileName,
+ String scriptLocation) throws IOException {
+ FileWriter fw = new FileWriter(scriptLocation + "/" + fileName);
+ PrintWriter pw = new PrintWriter(fw);
+ for(String cmd : commands) {
+ pw.println(cmd + ";");
+ }
+ return pw;
+ }
+ private static void makeRenameFileScript(String scriptLocation) throws IOException {
+ List commands = Collections.emptyList();
+ if (commands.isEmpty()) {
+ LOG.info("No file renaming is necessary");
+ } else {
+ String fileName = "normalizeFileNames_" + System.currentTimeMillis() + ".sh";
+ LOG.debug("Writing file renaming commands to " + fileName);
+ PrintWriter pw = createScript(commands, fileName, scriptLocation);
+ pw.close();
+ }
+ }
+ private static boolean isFullAcidTable(Table t) {
+ if (t.getParametersSize() <= 0) {
+ //cannot be acid
+ return false;
+ }
+ String transacationalValue = t.getParameters()
+ .get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+ if (transacationalValue != null && "true".equalsIgnoreCase(transacationalValue)) {
+ System.out.println("Found Acid table: " + Warehouse.getQualifiedName(t));
+ return true;
+ }
+ return false;
+ }
+ private static boolean isAcidEnabled(HiveConf hiveConf) {
+ String txnMgr = hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER);
+ boolean concurrency = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
+ String dbTxnMgr = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager";
+ return txnMgr.equals(dbTxnMgr) && concurrency;
+ }
+ /**
+ * can set it from tests to test when config needs something other than default values
+ * For example, that acid is enabled
+ */
+ @VisibleForTesting
+ static HiveConf hiveConf = null;
+}
diff --git upgrade-acid/post-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestUpgradeTool.java upgrade-acid/post-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestUpgradeTool.java
new file mode 100644
index 0000000000..a945ad9a7d
--- /dev/null
+++ upgrade-acid/post-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestUpgradeTool.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.upgrade.acid;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.HiveInputFormat;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+public class TestUpgradeTool {
+ private static final Logger LOG = LoggerFactory.getLogger(TestUpgradeTool.class);
+ private static final String TEST_DATA_DIR = new File(System.getProperty("java.io.tmpdir") +
+ File.separator + TestUpgradeTool.class.getCanonicalName() + "-" + System.currentTimeMillis()
+ ).getPath().replaceAll("\\\\", "/");
+
+ private String getTestDataDir() {
+ return TEST_DATA_DIR;
+ }
+
+ /**
+ * includes 'execute' for postUpgrade
+ */
+ @Test
+ public void testPostUpgrade() throws Exception {
+ int[][] dataPart = {{1, 2, 10}, {3, 4, 11}, {5, 6, 12}};
+ hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "dynamic");
+ runStatementOnDriver("drop table if exists TAcid");
+ runStatementOnDriver("drop table if exists TAcidPart");
+ runStatementOnDriver("drop table if exists TFlat");
+ runStatementOnDriver("drop table if exists TFlatText");
+
+ //should be converted to Acid
+ runStatementOnDriver("create table TAcid (a int, b int) clustered by (b) into 2 buckets" +
+ " stored as orc TBLPROPERTIES ('transactional'='false')");
+ //should be converted to Acid
+ runStatementOnDriver("create table TAcidPart (a int, b int) partitioned by (p int)" +
+ " clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='false')");
+ //to create some partitions
+ runStatementOnDriver("insert into TAcidPart partition(p)" + makeValuesClause(dataPart));
+
+ //should be converted to Acid
+ runStatementOnDriver("create table TFlat (a int, b int) stored as orc " +
+ "tblproperties('transactional'='false')");
+ //should be converted to MM
+ runStatementOnDriver("create table TFlatText (a int, b int) stored as textfile " +
+ "tblproperties('transactional'='false')");
+
+ Hive db = Hive.get(hiveConf);
+ Table tacid = db.getTable("default", "tacid");
+ Assert.assertEquals("Expected TAcid to not be full acid", false,
+ AcidUtils.isFullAcidTable(tacid));
+ Table tacidpart = db.getTable("default", "tacidpart");
+ Assert.assertEquals("Expected TAcidPart to not be full acid", false,
+ AcidUtils.isFullAcidTable(tacidpart));
+
+ Table t = db.getTable("default", "tflat");
+ Assert.assertEquals("Expected TAcid to not be full acid", false,
+ AcidUtils.isFullAcidTable(t));
+ t = db.getTable("default", "tflattext");
+ Assert.assertEquals("Expected TAcidPart to not be full acid", false,
+ AcidUtils.isInsertOnlyTable(tacidpart));
+
+
+ String[] args2 = {"-location", getTestDataDir(), "-execute"};
+ UpgradeTool.hiveConf = hiveConf;
+ UpgradeTool.main(args2);
+
+ tacid = db.getTable("default", "tacid");
+ Assert.assertEquals("Expected TAcid to become full acid", true,
+ AcidUtils.isFullAcidTable(tacid));
+ tacidpart = db.getTable("default", "tacidpart");
+ Assert.assertEquals("Expected TAcidPart to become full acid", true,
+ AcidUtils.isFullAcidTable(tacidpart));
+
+ t = db.getTable("default", "tflat");
+ Assert.assertEquals("Expected TAcid to become acid", true, AcidUtils.isFullAcidTable(t));
+ t = db.getTable("default", "tflattext");
+ Assert.assertEquals("Expected TAcidPart to become MM", true,
+ AcidUtils.isInsertOnlyTable(t));
+ }
+
+ private static String makeValuesClause(int[][] rows) {
+ assert rows.length > 0;
+ StringBuilder sb = new StringBuilder(" values");
+ for(int[] row : rows) {
+ assert row.length > 0;
+ if(row.length > 1) {
+ sb.append("(");
+ }
+ for(int value : row) {
+ sb.append(value).append(",");
+ }
+ sb.setLength(sb.length() - 1);//remove trailing comma
+ if(row.length > 1) {
+ sb.append(")");
+ }
+ sb.append(",");
+ }
+ sb.setLength(sb.length() - 1);//remove trailing comma
+ return sb.toString();
+ }
+
+ private List runStatementOnDriver(String stmt) throws Exception {
+ CommandProcessorResponse cpr = d.run(stmt);
+ if(cpr.getResponseCode() != 0) {
+ throw new RuntimeException(stmt + " failed: " + cpr);
+ }
+ List rs = new ArrayList();
+ d.getResults(rs);
+ return rs;
+ }
+ @Before
+ public void setUp() throws Exception {
+ setUpInternal();
+ }
+ private void initHiveConf() {
+ hiveConf = new HiveConf(this.getClass());
+ }
+ @Rule
+ public TestName testName = new TestName();
+ private HiveConf hiveConf;
+ private Driver d;
+ private void setUpInternal() throws Exception {
+ initHiveConf();
+ TxnDbUtil.cleanDb(hiveConf);
+ FileUtils.deleteDirectory(new File(getTestDataDir()));
+
+ Path workDir = new Path(System.getProperty("test.tmp.dir",
+ "target" + File.separator + "test" + File.separator + "tmp"));
+ hiveConf.set("mapred.local.dir", workDir + File.separator + this.getClass().getSimpleName()
+ + File.separator + "mapred" + File.separator + "local");
+ hiveConf.set("mapred.system.dir", workDir + File.separator + this.getClass().getSimpleName()
+ + File.separator + "mapred" + File.separator + "system");
+ hiveConf.set("mapreduce.jobtracker.staging.root.dir", workDir + File.separator +
+ this.getClass().getSimpleName()
+ + File.separator + "mapred" + File.separator + "staging");
+ hiveConf.set("mapred.temp.dir", workDir + File.separator + this.getClass().getSimpleName()
+ + File.separator + "mapred" + File.separator + "temp");
+ hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+ hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+ hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getWarehouseDir());
+ hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName());
+ hiveConf
+ .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
+ "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd." +
+ "SQLStdHiveAuthorizerFactory");
+ hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true);
+ hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false);
+ TxnDbUtil.setConfValues(hiveConf);
+ TxnDbUtil.prepDb(hiveConf);
+ File f = new File(getWarehouseDir());
+ if (f.exists()) {
+ FileUtil.fullyDelete(f);
+ }
+ if (!(new File(getWarehouseDir()).mkdirs())) {
+ throw new RuntimeException("Could not create " + getWarehouseDir());
+ }
+ SessionState ss = SessionState.start(hiveConf);
+ ss.applyAuthorizationPolicy();
+ d = new Driver(new QueryState.Builder().withHiveConf(hiveConf).nonIsolated().build(), null);
+ d.setMaxRows(10000);
+ }
+ private String getWarehouseDir() {
+ return getTestDataDir() + "/warehouse";
+ }
+ @After
+ public void tearDown() throws Exception {
+ if (d != null) {
+ d.close();
+ d.destroy();
+ d = null;
+ }
+ }
+
+}
diff --git upgrade-acid/pre-upgrade/pom.xml upgrade-acid/pre-upgrade/pom.xml
new file mode 100644
index 0000000000..b5ef824ef3
--- /dev/null
+++ upgrade-acid/pre-upgrade/pom.xml
@@ -0,0 +1,282 @@
+
+
+
+
+
+ org.apache.hive
+ hive-upgrade-acid
+ 4.0.0-SNAPSHOT
+ ../pom.xml
+
+
+
+ 4.0.0
+
+ 4.0.0-SNAPSHOT
+ hive-pre-upgrade
+ Hive Pre Upgrade Acid
+ jar
+
+
+ ../..
+
+
+ ${project.build.directory}/testconf
+ file://
+ ${project.basedir}/src/test/resources
+ ${project.build.directory}/tmp
+ ${project.build.directory}/warehouse
+ file://
+ 1
+ true
+
+
+
+
+ commons-cli
+ commons-cli
+ 1.2
+ provided
+
+
+ org.apache.hive
+ hive-metastore
+ 2.3.3
+ provided
+
+
+ org.apache.hive
+ hive-exec
+ 2.3.3
+ provided
+
+
+ org.apache.hadoop
+ hadoop-common
+ 2.7.2
+ provided
+
+
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-common
+ 2.7.2
+ provided
+
+
+ org.apache.orc
+ orc-core
+ 1.3.3
+ provided
+
+
+
+
+
+
+
+ ${basedir}/src/main/resources
+
+ package.jdo
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+ ${maven.antrun.plugin.version}
+
+
+ ant-contrib
+ ant-contrib
+ ${ant.contrib.version}
+
+
+ ant
+ ant
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-checkstyle-plugin
+ ${maven.checkstyle.plugin.version}
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ ${maven.exec.plugin.version}
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ setup-test-dirs
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ setup-metastore-scripts
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-failsafe-plugin
+ 2.20.1
+
+
+
+ integration-test
+ verify
+
+
+
+
+ true
+ false
+ -Xmx2048m
+ false
+
+ true
+ ${test.tmp.dir}
+ ${test.tmp.dir}
+ true
+
+
+ ${log4j.conf.dir}
+
+ ${skipITests}
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+ ${maven.surefire.version}
+
+ true
+ false
+ ${test.forkcount}
+ -Xmx2048m
+ false
+
+ ${project.build.directory}
+ true
+ ${derby.version}
+ ${test.tmp.dir}/derby.log
+
+ ${test.log4j.scheme}${test.conf.dir}/hive-log4j2.properties
+ true
+ ${test.tmp.dir}
+
+ jdbc:derby:${test.tmp.dir}/junit_metastore_db;create=true
+ false
+ ${test.tmp.dir}
+ ${test.warehouse.scheme}${test.warehouse.dir}
+
+
+
+ ${log4j.conf.dir}
+ ${test.conf.dir}
+
+ ${test.conf.dir}/conf
+
+
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+
+ test-jar
+
+
+
+
+
+
+
\ No newline at end of file
diff --git upgrade-acid/src/main/java/org/apache/hadoop/hive/upgrade/acid/UpgradeTool.java upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java
similarity index 75%
rename from upgrade-acid/src/main/java/org/apache/hadoop/hive/upgrade/acid/UpgradeTool.java
rename to upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java
index 78c084392d..f3562c7d72 100644
--- upgrade-acid/src/main/java/org/apache/hadoop/hive/upgrade/acid/UpgradeTool.java
+++ upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java
@@ -33,7 +33,6 @@
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.CompactionResponse;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -41,7 +40,6 @@
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
@@ -109,22 +107,22 @@
* "location" option may be supplied followed by a path to set the location for the generated
* scripts.
*/
-public class UpgradeTool {
- private static final Logger LOG = LoggerFactory.getLogger(UpgradeTool.class);
+public class PreUpgradeTool {
+ private static final Logger LOG = LoggerFactory.getLogger(PreUpgradeTool.class);
private static final int PARTITION_BATCH_SIZE = 10000;
private final Options cmdLineOptions = new Options();
public static void main(String[] args) throws Exception {
- UpgradeTool tool = new UpgradeTool();
+ PreUpgradeTool tool = new PreUpgradeTool();
tool.init();
CommandLineParser parser = new GnuParser();
CommandLine line ;
String outputDir = ".";
- boolean preUpgrade = false, postUpgrade = false, execute = false, nonBlocking = false;
+ boolean execute = false;
try {
line = parser.parse(tool.cmdLineOptions, args);
} catch (ParseException e) {
- System.err.println("UpgradeTool: Parsing failed. Reason: " + e.getLocalizedMessage());
+ System.err.println("PreUpgradeTool: Parsing failed. Reason: " + e.getLocalizedMessage());
printAndExit(tool);
return;
}
@@ -139,39 +137,21 @@ public static void main(String[] args) throws Exception {
if(line.hasOption("execute")) {
execute = true;
}
- if(line.hasOption("preUpgrade")) {
- preUpgrade = true;
- }
- if(line.hasOption("postUpgrade")) {
- postUpgrade = true;
- }
- LOG.info("Starting with preUpgrade=" + preUpgrade + ", postUpgrade=" + postUpgrade +
- ", execute=" + execute + ", location=" + outputDir);
- if(preUpgrade && postUpgrade) {
- throw new IllegalArgumentException("Cannot specify both preUpgrade and postUpgrade");
- }
+ LOG.info("Starting with execute=" + execute + ", location=" + outputDir);
try {
String hiveVer = HiveVersionInfo.getShortVersion();
- if(preUpgrade) {
- if(!hiveVer.startsWith("2.")) {
- throw new IllegalStateException("preUpgrade requires Hive 2.x. Actual: " + hiveVer);
- }
+ if(!hiveVer.startsWith("2.")) {
+ throw new IllegalStateException("preUpgrade requires Hive 2.x. Actual: " + hiveVer);
}
- if(postUpgrade && execute && !isTestMode) {
- if(!hiveVer.startsWith("3.")) {
- throw new IllegalStateException("postUpgrade w/execute requires Hive 3.x. Actual: " +
- hiveVer);
- }
- }
- tool.prepareAcidUpgradeInternal(outputDir, preUpgrade, postUpgrade, execute);
+ tool.prepareAcidUpgradeInternal(outputDir, execute);
}
catch(Exception ex) {
- LOG.error("UpgradeTool failed", ex);
+ LOG.error("PreUpgradeTool failed", ex);
throw ex;
}
}
- private static void printAndExit(UpgradeTool tool) {
+ private static void printAndExit(PreUpgradeTool tool) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("upgrade-acid", tool.cmdLineOptions);
System.exit(1);
@@ -179,13 +159,8 @@ private static void printAndExit(UpgradeTool tool) {
private void init() {
try {
- cmdLineOptions.addOption(new Option("help", "print this message"));
- cmdLineOptions.addOption(new Option("preUpgrade",
- "Generates a script to execute on 2.x cluster. This requires 2.x binaries" +
- " on the classpath and hive-site.xml."));
- cmdLineOptions.addOption(new Option("postUpgrade",
- "Generates a script to execute on 3.x cluster. This requires 3.x binaries" +
- " on the classpath and hive-site.xml."));
+ cmdLineOptions.addOption(new Option("help", "Generates a script to execute on 2.x" +
+ " cluster. This requires 2.x binaries on the classpath and hive-site.xml."));
Option exec = new Option("execute",
"Executes commands equivalent to generated scrips");
exec.setOptionalArg(true);
@@ -208,8 +183,8 @@ private void init() {
*
*
*/
- private void prepareAcidUpgradeInternal(String scriptLocation, boolean preUpgrade,
- boolean postUpgrade, boolean execute) throws HiveException, TException, IOException {
+ private void prepareAcidUpgradeInternal(String scriptLocation, boolean execute)
+ throws HiveException, TException, IOException {
HiveConf conf = hiveConf != null ? hiveConf : new HiveConf();
boolean isAcidEnabled = isAcidEnabled(conf);
HiveMetaStoreClient hms = new HiveMetaStoreClient(conf);//MetaException
@@ -232,9 +207,9 @@ private void prepareAcidUpgradeInternal(String scriptLocation, boolean preUpgrad
for(String tableName : tables) {
Table t = hms.getTable(dbName, tableName);
LOG.debug("processing table " + Warehouse.getQualifiedName(t));
- if(preUpgrade && isAcidEnabled) {
+ if(isAcidEnabled) {
//if acid is off, there can't be any acid tables - nothing to compact
- if(execute && txns == null) {
+ if(txns == null) {
/*
This API changed from 2.x to 3.0. so this won't even compile with 3.0
but it doesn't need to since we only run this preUpgrade
@@ -246,18 +221,12 @@ private void prepareAcidUpgradeInternal(String scriptLocation, boolean preUpgrad
getCompactionCommands(t, conf, hms, compactionMetaInfo, execute, db, txns);
compactions.addAll(compactionCommands);
}
- if(postUpgrade && isAcidEnabled) {
- //if acid is off post upgrade, you can't make any tables acid - will throw
- processConversion(t, convertToAcid, convertToMM, hms, db, execute);
- }
/*todo: handle renaming files somewhere*/
}
}
makeCompactionScript(compactions, scriptLocation, compactionMetaInfo);
- makeConvertTableScript(convertToAcid, convertToMM, scriptLocation);
- makeRenameFileScript(scriptLocation);//todo: is this pre or post upgrade?
- //todo: can different tables be in different FileSystems?
- if(preUpgrade && execute) {
+
+ if(execute) {
while(compactionMetaInfo.compactionIds.size() > 0) {
LOG.debug("Will wait for " + compactionMetaInfo.compactionIds.size() +
" compactions to complete");
@@ -321,113 +290,6 @@ private static void alterTable(Table t, Hive db, boolean isMM)
db.alterTable(Warehouse.getQualifiedName(t), metaTable, false, null);
}
- /**
- * todo: handle exclusion list
- * Figures out which tables to make Acid, MM and (optionally, performs the operation)
- */
- private static void processConversion(Table t, List convertToAcid,
- List convertToMM, HiveMetaStoreClient hms, Hive db, boolean execute)
- throws TException, HiveException {
- if(isFullAcidTable(t)) {
- return;
- }
- if(!TableType.MANAGED_TABLE.name().equalsIgnoreCase(t.getTableType())) {
- return;
- }
- String fullTableName = Warehouse.getQualifiedName(t);
- if(t.getPartitionKeysSize() <= 0) {
- if(canBeMadeAcid(fullTableName, t.getSd())) {
- convertToAcid.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" +
- "'transactional'='true')");
- if(execute) {
- alterTable(t, db, false);
- }
- }
- else {
- convertToMM.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" +
- "'transactional'='true', 'transactional_properties'='insert_only')");
- if(execute) {
- alterTable(t, db, true);
- }
- }
- }
- else {
- /*
- each Partition may have different I/O Format so have to check them all before deciding to
- make a full CRUD table.
- Run in batches to prevent OOM
- */
- List partNames = hms.listPartitionNames(t.getDbName(), t.getTableName(), (short)-1);
- int batchSize = PARTITION_BATCH_SIZE;
- int numWholeBatches = partNames.size()/batchSize;
- for(int i = 0; i < numWholeBatches; i++) {
- List partitionList = hms.getPartitionsByNames(t.getDbName(), t.getTableName(),
- partNames.subList(i * batchSize, (i + 1) * batchSize));
- if(alterTable(fullTableName, partitionList, convertToMM, t, db, execute)) {
- return;
- }
- }
- if(numWholeBatches * batchSize < partNames.size()) {
- //last partial batch
- List partitionList = hms.getPartitionsByNames(t.getDbName(), t.getTableName(),
- partNames.subList(numWholeBatches * batchSize, partNames.size()));
- if(alterTable(fullTableName, partitionList, convertToMM, t, db, execute)) {
- return;
- }
- }
- //if here checked all parts and they are Acid compatible - make it acid
- convertToAcid.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" +
- "'transactional'='true')");
- if(execute) {
- alterTable(t, db, false);
- }
- }
- }
- /**
- * @return true if table was converted/command generated
- */
- private static boolean alterTable(String fullTableName, List partitionList,
- List convertToMM, Table t, Hive db, boolean execute)
- throws InvalidOperationException, HiveException {
- for(Partition p : partitionList) {
- if(!canBeMadeAcid(fullTableName, p.getSd())) {
- convertToMM.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" +
- "'transactional'='true', 'transactional_properties'='insert_only')");
- if(execute) {
- alterTable(t, db, true);
- }
- return true;
- }
- }
- return false;
- }
- private static boolean canBeMadeAcid(String fullTableName, StorageDescriptor sd) {
- return isAcidInputOutputFormat(fullTableName, sd) && sd.getSortColsSize() <= 0;
- }
- private static boolean isAcidInputOutputFormat(String fullTableName, StorageDescriptor sd) {
- try {
- Class inputFormatClass = sd.getInputFormat() == null ? null :
- Class.forName(sd.getInputFormat());
- Class outputFormatClass = sd.getOutputFormat() == null ? null :
- Class.forName(sd.getOutputFormat());
-
- if (inputFormatClass != null && outputFormatClass != null &&
- Class.forName("org.apache.hadoop.hive.ql.io.AcidInputFormat")
- .isAssignableFrom(inputFormatClass) &&
- Class.forName("org.apache.hadoop.hive.ql.io.AcidOutputFormat")
- .isAssignableFrom(outputFormatClass)) {
- return true;
- }
- } catch (ClassNotFoundException e) {
- //if a table is using some custom I/O format and it's not in the classpath, we won't mark
- //the table for Acid, but today (Hive 3.1 and earlier) OrcInput/OutputFormat is the only
- //Acid format
- LOG.error("Could not determine if " + fullTableName +
- " can be made Acid due to: " + e.getMessage(), e);
- return false;
- }
- return false;
- }
/**
* Generates a set compaction commands to run on pre Hive 3 cluster
*/
@@ -464,29 +326,6 @@ private static void makeCompactionScript(List commands, String scriptLoc
"-- capacity of this queue appropriately");
}
}
- private static void makeConvertTableScript(List alterTableAcid, List alterTableMm,
- String scriptLocation) throws IOException {
- if (alterTableAcid.isEmpty()) {
- LOG.info("No acid conversion is necessary");
- } else {
- String fileName = "convertToAcid_" + System.currentTimeMillis() + ".sql";
- LOG.debug("Writing CRUD conversion commands to " + fileName);
- try(PrintWriter pw = createScript(alterTableAcid, fileName, scriptLocation)) {
- //todo: fix this - it has to run in 3.0 since tables may be unbucketed
- pw.println("-- These commands may be executed by Hive 1.x later");
- }
- }
-
- if (alterTableMm.isEmpty()) {
- LOG.info("No managed table conversion is necessary");
- } else {
- String fileName = "convertToMM_" + System.currentTimeMillis() + ".sql";
- LOG.debug("Writing managed table conversion commands to " + fileName);
- try(PrintWriter pw = createScript(alterTableMm, fileName, scriptLocation)) {
- pw.println("-- These commands must be executed by Hive 3.0 or later");
- }
- }
- }
private static PrintWriter createScript(List commands, String fileName,
String scriptLocation) throws IOException {
@@ -497,17 +336,6 @@ private static PrintWriter createScript(List commands, String fileName,
}
return pw;
}
- private static void makeRenameFileScript(String scriptLocation) throws IOException {
- List commands = Collections.emptyList();
- if (commands.isEmpty()) {
- LOG.info("No file renaming is necessary");
- } else {
- String fileName = "normalizeFileNames_" + System.currentTimeMillis() + ".sh";
- LOG.debug("Writing file renaming commands to " + fileName);
- PrintWriter pw = createScript(commands, fileName, scriptLocation);
- pw.close();
- }
- }
/**
* @return any compaction commands to run for {@code Table t}
*/
@@ -795,11 +623,6 @@ void onWaitForCompaction() throws MetaException {}
static Callback callback;
@VisibleForTesting
static int pollIntervalMs = 1000*30;
- /**
- * Also to enable testing until I set up Maven profiles to be able to run with 3.0 jars
- */
- @VisibleForTesting
- static boolean isTestMode = false;
/**
* can set it from tests to test when config needs something other than default values
*/
diff --git upgrade-acid/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestUpgradeTool.java upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java
similarity index 72%
rename from upgrade-acid/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestUpgradeTool.java
rename to upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java
index c8964a4a4c..4fe7007c96 100644
--- upgrade-acid/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestUpgradeTool.java
+++ upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java
@@ -52,10 +52,10 @@
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
-public class TestUpgradeTool {
- private static final Logger LOG = LoggerFactory.getLogger(TestUpgradeTool.class);
+public class TestPreUpgradeTool {
+ private static final Logger LOG = LoggerFactory.getLogger(TestPreUpgradeTool.class);
private static final String TEST_DATA_DIR = new File(System.getProperty("java.io.tmpdir") +
- File.separator + TestUpgradeTool.class.getCanonicalName() + "-" + System.currentTimeMillis()
+ File.separator + TestPreUpgradeTool.class.getCanonicalName() + "-" + System.currentTimeMillis()
).getPath().replaceAll("\\\\", "/");
private String getTestDataDir() {
@@ -78,6 +78,7 @@ public void testUpgrade() throws Exception {
runStatementOnDriver("create table TAcid (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
runStatementOnDriver("create table TAcidPart (a int, b int) partitioned by (p tinyint) clustered by (b) into 2 buckets stored" +
" as orc TBLPROPERTIES ('transactional'='true')");
+ //on 2.x these are guaranteed to not be acid
runStatementOnDriver("create table TFlat (a int, b int) stored as orc tblproperties('transactional'='false')");
runStatementOnDriver("create table TFlatText (a int, b int) stored as textfile tblproperties('transactional'='false')");
@@ -99,19 +100,19 @@ public void testUpgrade() throws Exception {
//todo: add partitioned table that needs conversion to MM/Acid
//todo: rename files case
- String[] args = {"-location", getTestDataDir(), "-preUpgrade", "-execute"};
- UpgradeTool.callback = new UpgradeTool.Callback() {
+ String[] args = {"-location", getTestDataDir(), "-execute"};
+ PreUpgradeTool.callback = new PreUpgradeTool.Callback() {
@Override
void onWaitForCompaction() throws MetaException {
runWorker(hiveConf);
}
};
- UpgradeTool.pollIntervalMs = 1;
- UpgradeTool.hiveConf = hiveConf;
- UpgradeTool.main(args);
+ PreUpgradeTool.pollIntervalMs = 1;
+ PreUpgradeTool.hiveConf = hiveConf;
+ PreUpgradeTool.main(args);
/*
todo: parse
- target/tmp/org.apache.hadoop.hive.upgrade.acid.TestUpgradeTool-1527286256834/compacts_1527286277624.sql
+ target/tmp/org.apache.hadoop.hive.upgrade.acid.TestPreUpgradeTool-1527286256834/compacts_1527286277624.sql
make sure it's the only 'compacts' file and contains
ALTER TABLE default.tacid COMPACT 'major';
ALTER TABLE default.tacidpart PARTITION(p=10Y) COMPACT 'major';
@@ -125,68 +126,13 @@ void onWaitForCompaction() throws MetaException {
Assert.assertEquals(e.toString(), TxnStore.CLEANING_RESPONSE, e.getState());
}
- String[] args2 = {"-location", getTestDataDir(), "-postUpgrade"};
- UpgradeTool.main(args2);
+ String[] args2 = {"-location", getTestDataDir()};
+ PreUpgradeTool.main(args2);
/*
- * todo: parse
- * convertToAcid_1527286288784.sql make sure it has
- * ALTER TABLE default.tflat SET TBLPROPERTIES ('transactional'='true');
- * convertToMM_1527286288784.sql make sure it has
- * ALTER TABLE default.tflattext SET TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only');
+ * todo: parse compacts script - make sure there is nothing in it
* */
}
- /**
- * includes 'execute' for postUpgrade
- * @throws Exception
- */
- @Test
- public void testPostUpgrade() throws Exception {
- int[][] dataPart = {{1, 2, 10}, {3, 4, 11}, {5, 6, 12}};
- hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "dynamic");
- runStatementOnDriver("drop table if exists TAcid");
- runStatementOnDriver("drop table if exists TAcidPart");
- runStatementOnDriver("drop table if exists TFlat");
- runStatementOnDriver("drop table if exists TFlatText");
-
- runStatementOnDriver("create table TAcid (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='false')");
- runStatementOnDriver("create table TAcidPart (a int, b int) partitioned by (p int) clustered by (b) into 2 buckets stored" +
- " as orc TBLPROPERTIES ('transactional'='false')");
- //to create some partitions
- runStatementOnDriver("insert into TAcidPart partition(p)" + makeValuesClause(dataPart));
-
-
- //todo: to test these need to link against 3.x libs - maven profiles?
- //runStatementOnDriver("create table TFlat (a int, b int) stored as orc tblproperties('transactional'='false')");
- //runStatementOnDriver("create table TFlatText (a int, b int) stored as textfile tblproperties('transactional'='false')");
-
- Hive db = Hive.get(hiveConf);
- Table tacid = db.getTable("default", "tacid");
- Assert.assertEquals("Expected TAcid to become full acid", false, AcidUtils.isAcidTable(tacid));
- Table tacidpart = db.getTable("default", "tacidpart");
- Assert.assertEquals("Expected TAcidPart to become full acid", false,
- AcidUtils.isAcidTable(tacidpart));
-
-
- String[] args2 = {"-location", getTestDataDir(), "-postUpgrade", "-execute"};
- UpgradeTool.isTestMode = true;
- UpgradeTool.hiveConf = hiveConf;
- UpgradeTool.main(args2);
-
- tacid = db.getTable("default", "tacid");
- Assert.assertEquals("Expected TAcid to become full acid", true, AcidUtils.isAcidTable(tacid));
- tacidpart = db.getTable("default", "tacidpart");
- Assert.assertEquals("Expected TAcidPart to become full acid", true,
- AcidUtils.isAcidTable(tacidpart));
-
- /**
- todo: parse
- target/tmp/org.apache.hadoop.hive.upgrade.acid.TestUpgradeTool-1527286026461/convertToAcid_1527286063065.sql
- make sure it has:
- ALTER TABLE default.tacid SET TBLPROPERTIES ('transactional'='true');
- ALTER TABLE default.tacidpart SET TBLPROPERTIES ('transactional'='true');
- */
- }
private static void runWorker(HiveConf hiveConf) throws MetaException {
AtomicBoolean stop = new AtomicBoolean(true);
Worker t = new Worker();