diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index d3af0ed958..9b440f33f1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -36,6 +36,8 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
import org.apache.hadoop.hive.ql.parse.EximUtil;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
@@ -53,7 +55,6 @@ Licensed to the Apache Software Foundation (ASF) under one
import org.apache.hadoop.hive.ql.parse.repl.dump.log.BootstrapDumpLogger;
import org.apache.hadoop.hive.ql.parse.repl.dump.log.IncrementalDumpLogger;
import org.apache.hadoop.hive.ql.plan.api.StageType;
-import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -239,12 +240,16 @@ private Path dumpDbMetadata(String dbName, Path dumpRoot) throws Exception {
private void dumpTable(String dbName, String tblName, Path dbRoot) throws Exception {
try {
Hive db = getHive();
- TableSpec ts = new TableSpec(db, conf, dbName + "." + tblName, null);
+ HiveWrapper.Tuple
tuple = new HiveWrapper(db, dbName).table(tblName);
+ Partition partition = db.getPartition(tuple.object, null, false);
+ TableSpec tableSpec = new TableSpec(tuple.object,partition);
TableExport.Paths exportPaths =
new TableExport.Paths(work.astRepresentationForErrorMsg, dbRoot, tblName, conf);
String distCpDoAsUser = conf.getVar(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER);
- new TableExport(exportPaths, ts, getNewReplicationSpec(), db, distCpDoAsUser, conf).write();
- replLogger.tableLog(tblName, ts.tableHandle.getTableType());
+ tuple.replicationSpec.setIsReplace(true); // by default for all other objects this is false
+ new TableExport(exportPaths, tableSpec, tuple.replicationSpec, db, distCpDoAsUser, conf).write();
+
+ replLogger.tableLog(tblName, tableSpec.tableHandle.getTableType());
} catch (InvalidTableException te) {
// Bootstrap dump shouldn't fail if the table is dropped/renamed while dumping it.
// Just log a debug message and skip it.
@@ -252,13 +257,6 @@ private void dumpTable(String dbName, String tblName, Path dbRoot) throws Except
}
}
- private ReplicationSpec getNewReplicationSpec() throws TException {
- ReplicationSpec rspec = getNewReplicationSpec("replv2", "will-be-set");
- rspec.setCurrentReplicationState(String.valueOf(getHive().getMSC()
- .getCurrentNotificationEventId().getEventId()));
- return rspec;
- }
-
private ReplicationSpec getNewReplicationSpec(String evState, String objState) {
return new ReplicationSpec(true, false, false, evState, objState,
false, true, true);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 136e951475..96defd106e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -24,6 +24,7 @@
import java.sql.Date;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -1110,20 +1111,28 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast)
this(db, conf, ast, true, false);
}
- public TableSpec(Hive db, HiveConf conf, String tableName, Map partSpec)
- throws HiveException {
- this.tableName = tableName;
- this.partSpec = partSpec;
- this.tableHandle = db.getTable(tableName);
- if (partSpec != null) {
- this.specType = SpecType.STATIC_PARTITION;
- this.partHandle = db.getPartition(tableHandle, partSpec, false);
- this.partitions = Arrays.asList(partHandle);
+ public TableSpec(Table table, Partition partition) {
+ initialize(table, partition);
+ }
+
+ private void initialize(final Table table, final Partition partition) {
+ tableHandle = table;
+ tableName = table.getDbName() + "." + table.getTableName();
+ if (partition == null) {
+ specType = SpecType.TABLE_ONLY;
} else {
- this.specType = SpecType.TABLE_ONLY;
+ partHandle = partition;
+ partitions = Collections.singletonList(partHandle);
+ specType = SpecType.STATIC_PARTITION;
}
}
+ public TableSpec(Hive db, String tableName, Map partSpec)
+ throws HiveException {
+ Table table = db.getTable(tableName);
+ initialize(table, partSpec == null ? null : db.getPartition(table, partSpec, false));
+ }
+
public TableSpec(Table tableHandle, List partitions)
throws HiveException {
this.tableHandle = tableHandle;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index a054abb127..230ca47e4a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -51,7 +51,6 @@
import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask;
-import org.apache.hadoop.hive.ql.exec.FunctionInfo;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -1705,7 +1704,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast,
Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc);
mergeDesc.setOutputDir(queryTmpdir);
LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
- partSpec == null ? new HashMap() : partSpec);
+ partSpec == null ? new HashMap<>() : partSpec);
ltd.setLbCtx(lbCtx);
Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
conf);
@@ -1715,8 +1714,8 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast,
StatsWork statDesc;
if (oldTblPartLoc.equals(newTblPartLoc)) {
// If we're merging to the same location, we can avoid some metastore calls
- TableSpec tablepart = new TableSpec(db, conf, tableName, partSpec);
- statDesc = new StatsWork(tablepart);
+ TableSpec tableSpec = new TableSpec(db, tableName, partSpec);
+ statDesc = new StatsWork(tableSpec);
} else {
statDesc = new StatsWork(ltd);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/BootStrapReplicationSpecFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/BootStrapReplicationSpecFunction.java
index 6a05ea416e..93120354c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/BootStrapReplicationSpecFunction.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/BootStrapReplicationSpecFunction.java
@@ -50,7 +50,6 @@ public ReplicationSpec fromMetaStore() throws HiveException {
return replicationSpec;
} catch (Exception e) {
throw new SemanticException(e);
- // TODO : simple wrap & rethrow for now, clean up with error codes
}
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
index 27a6ea6c0e..7edfc6a025 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
@@ -20,6 +20,7 @@
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
/**
@@ -49,6 +50,10 @@ public HiveWrapper(Hive db, String dbName) {
return new Tuple<>(functionForSpec, () -> db.getDatabase(dbName));
}
+ public Tuple table(final String tableName) throws HiveException {
+ return new Tuple<>(functionForSpec, () -> db.getTable(dbName, tableName));
+ }
+
public static class Tuple {
interface Function {