diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index 20637c0575..23433a5ca7 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -94,6 +94,11 @@
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+
org.apache.hive
hive-exec
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java
index 144afe3c7a..b18a80a70a 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java
@@ -25,7 +25,7 @@
import java.util.Collections;
import java.util.List;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapper.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapper.java
index b06b44aa5c..b64dac994e 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapper.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapper.java
@@ -20,7 +20,7 @@
import java.util.Collections;
import java.util.List;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants;
import org.apache.hadoop.hive.accumulo.serde.TooManyAccumuloColumnsException;
import org.apache.hadoop.hive.serde.serdeConstants;
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloMapColumnMapping.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloMapColumnMapping.java
index b2082e8c3c..450ecda5cb 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloMapColumnMapping.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloMapColumnMapping.java
@@ -16,7 +16,7 @@
*/
package org.apache.hadoop.hive.accumulo.columns;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants;
import com.google.common.base.Preconditions;
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
index 6a566182c3..dfa9903615 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
@@ -52,11 +52,11 @@
import org.apache.hadoop.hive.ql.index.IndexSearchCondition;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler.DecomposedPredicate;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -260,9 +260,9 @@ protected Object generateRanges(Configuration conf, ColumnMapper columnMapper,
String hiveRowIdColumnName, ExprNodeDesc root) {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler,
columnMapper.getRowIdMapping(), hiveRowIdColumnName);
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
List roots = new ArrayList();
roots.add(root);
HashMap nodeOutput = new HashMap();
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
index 17963820ed..fd4a8ccf5d 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.hive.accumulo.predicate.compare.LessThanOrEqual;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -45,7 +45,6 @@
import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.UTF8;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -60,7 +59,7 @@
/**
*
*/
-public class AccumuloRangeGenerator implements NodeProcessor {
+public class AccumuloRangeGenerator implements SemanticNodeProcessor {
private static final Logger LOG = LoggerFactory.getLogger(AccumuloRangeGenerator.class);
private final AccumuloPredicateHandler predicateHandler;
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMapper.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMapper.java
index e5f1b97bbd..dc449e4914 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMapper.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMapper.java
@@ -20,7 +20,7 @@
import java.util.Iterator;
import java.util.List;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants;
import org.apache.hadoop.hive.accumulo.serde.TooManyAccumuloColumnsException;
import org.apache.hadoop.hive.serde.serdeConstants;
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
index 4975fa0d5e..0b7855678a 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
@@ -24,22 +24,20 @@
import org.apache.hadoop.hive.accumulo.TestAccumuloDefaultIndexScanner;
import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding;
import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
import org.apache.hadoop.hive.common.type.Date;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToString;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
@@ -47,7 +45,6 @@
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPlus;
-import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.junit.Assert;
import org.junit.Before;
@@ -113,9 +110,9 @@ public void testRangeConjunction() throws Exception {
.asList(new Range(new Key("f"), true, new Key("m\0"), false));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -168,9 +165,9 @@ public void testRangeDisjunction() throws Exception {
List expectedRanges = Arrays.asList(new Range());
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -241,9 +238,9 @@ public void testRangeConjunctionWithDisjunction() throws Exception {
List expectedRanges = Arrays.asList(new Range(new Key("q"), true, null, false));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -296,9 +293,9 @@ public void testPartialRangeConjunction() throws Exception {
List expectedRanges = Arrays.asList(new Range(new Key("f"), true, null, false));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -354,9 +351,9 @@ public void testDateRangeConjunction() throws Exception {
"2014-07-01"), false));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -401,9 +398,9 @@ public void testCastExpression() throws Exception {
new GenericUDFOPEqualOrGreaterThan(), Arrays.asList(key, cast));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "key");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(node);
HashMap nodeOutput = new HashMap();
@@ -450,9 +447,9 @@ public void testRangeOverNonRowIdField() throws Exception {
new GenericUDFOPAnd(), bothFilters);
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -500,9 +497,9 @@ public void testRangeOverStringIndexedField() throws Exception {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -558,9 +555,9 @@ public void testRangeOverIntegerIndexedField() throws Exception {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -598,9 +595,9 @@ public void testRangeOverBooleanIndexedField() throws Exception {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(node);
HashMap nodeOutput = new HashMap();
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/FirstCharAccumuloCompositeRowId.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/FirstCharAccumuloCompositeRowId.java
index ed28e18140..abc0ee6024 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/FirstCharAccumuloCompositeRowId.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/FirstCharAccumuloCompositeRowId.java
@@ -19,7 +19,7 @@
import java.util.Arrays;
import java.util.Properties;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector;
import org.slf4j.Logger;
diff --git a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
index 7c552621f2..17bbbd770e 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
@@ -153,7 +153,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: UDFToDouble(_col0) (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: UDFToDouble(_col0) (type: double)
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
@@ -169,7 +169,7 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: UDFToDouble(_col0) (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: UDFToDouble(_col0) (type: double)
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -314,7 +314,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
@@ -330,7 +330,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
@@ -564,7 +564,7 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
@@ -603,7 +603,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col2 (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col2 (type: double)
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
@@ -611,7 +611,7 @@ STAGE PLANS:
TableScan
Reduce Output Operator
key expressions: _col1 (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col1 (type: double)
Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 8f47323700..338b105a2d 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -52,6 +52,8 @@
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
import org.apache.hadoop.hive.common.cli.ShellCmdExecutor;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -1220,71 +1222,93 @@ private boolean execute(String line, boolean call, boolean entireLineAsCommand)
return true;
}
+ private enum SectionType {
+ SINGLE_QUOTED, DOUBLE_QUOTED, LINE_COMMENT, BLOCK_COMMENT
+ }
+
/**
* Helper method to parse input from Beeline and convert it to a {@link List} of commands that
* can be executed. This method contains logic for handling delimiters that are placed within
* quotations. It iterates through each character in the line and checks to see if it is the delimiter, ',
* or "
*/
- private List getCmdList(String line, boolean entireLineAsCommand) {
- List cmdList = new ArrayList();
+ List getCmdList(String line, boolean entireLineAsCommand) {
if (entireLineAsCommand) {
- cmdList.add(line);
- } else {
- StringBuilder command = new StringBuilder();
-
- // Marker to track if there is starting double quote without an ending double quote
- boolean hasUnterminatedDoubleQuote = false;
+ return Stream.of(line).collect(Collectors.toList());
+ }
+ List cmdList = new ArrayList();
+ StringBuilder command = new StringBuilder();
- // Marker to track if there is starting single quote without an ending double quote
- boolean hasUnterminatedSingleQuote = false;
+ // Marker to track if there is a special section open
+ SectionType sectionType = null;
- // Index of the last seen delimiter in the given line
- int lastDelimiterIndex = 0;
+ // Index of the last seen delimiter in the given line
+ int lastDelimiterIndex = 0;
- // Marker to track if the previous character was an escape character
- boolean wasPrevEscape = false;
+ // Marker to track if the previous character was an escape character
+ boolean wasPrevEscape = false;
- int index = 0;
+ int index = 0;
- // Iterate through the line and invoke the addCmdPart method whenever the delimiter is seen that is not inside a
- // quoted string
- for (; index < line.length();) {
- if (line.startsWith("\'", index)) {
- // If a single quote is seen and the index is not inside a double quoted string and the previous character
- // was not an escape, then update the hasUnterminatedSingleQuote flag
- if (!hasUnterminatedDoubleQuote && !wasPrevEscape) {
- hasUnterminatedSingleQuote = !hasUnterminatedSingleQuote;
- }
- wasPrevEscape = false;
- index++;
- } else if (line.startsWith("\"", index)) {
- // If a double quote is seen and the index is not inside a single quoted string and the previous character
- // was not an escape, then update the hasUnterminatedDoubleQuote flag
- if (!hasUnterminatedSingleQuote && !wasPrevEscape) {
- hasUnterminatedDoubleQuote = !hasUnterminatedDoubleQuote;
- }
- wasPrevEscape = false;
- index++;
- } else if (line.startsWith(beeLine.getOpts().getDelimiter(), index)) {
- // If the delimiter is seen, and the line isn't inside a quoted string, then treat
- // line[lastDelimiterIndex] to line[index] as a single command
- if (!hasUnterminatedDoubleQuote && !hasUnterminatedSingleQuote) {
- addCmdPart(cmdList, command, line.substring(lastDelimiterIndex, index));
- lastDelimiterIndex = index + beeLine.getOpts().getDelimiter().length();
- }
- wasPrevEscape = false;
- index += beeLine.getOpts().getDelimiter().length();
- } else {
- wasPrevEscape = line.startsWith("\\", index) && !wasPrevEscape;
- index++;
- }
- }
- // If the line doesn't end with the delimiter or if the line is empty, add the cmd part
- if (lastDelimiterIndex != index || line.length() == 0) {
+ // Iterate through the line and invoke the addCmdPart method whenever the delimiter is seen that is not inside a
+ // quoted string
+ for (; index < line.length();) {
+ if (!wasPrevEscape && sectionType == null && line.startsWith("'", index)) {
+ // Opening non-escaped single quote
+ sectionType = SectionType.SINGLE_QUOTED;
+ index++;
+ } else if (!wasPrevEscape && sectionType == SectionType.SINGLE_QUOTED && line.startsWith("'", index)) {
+ // Closing non-escaped single quote
+ sectionType = null;
+ index++;
+ } else if (!wasPrevEscape && sectionType == null && line.startsWith("\"", index)) {
+ // Opening non-escaped double quote
+ sectionType = SectionType.DOUBLE_QUOTED;
+ index++;
+ } else if (!wasPrevEscape && sectionType == SectionType.DOUBLE_QUOTED && line.startsWith("\"", index)) {
+ // Closing non-escaped double quote
+ sectionType = null;
+ index++;
+ } else if (sectionType == null && line.startsWith("--", index)) {
+ // Opening line comment with (non-escapable?) double-dash
+ sectionType = SectionType.LINE_COMMENT;
+ wasPrevEscape = false;
+ index += 2;
+ } else if (sectionType == SectionType.LINE_COMMENT && line.startsWith("\n", index)) {
+ // Closing line comment with (non-escapable?) newline
+ sectionType = null;
+ wasPrevEscape = false;
+ index++;
+ } else if (sectionType == null && line.startsWith("/*", index)) {
+ // Opening block comment with (non-escapable?) /*
+ sectionType = SectionType.BLOCK_COMMENT;
+ wasPrevEscape = false;
+ index += 2;
+ } else if (sectionType == SectionType.BLOCK_COMMENT && line.startsWith("*/", index)) {
+ // Closing line comment with (non-escapable?) newline
+ sectionType = null;
+ wasPrevEscape = false;
+ index += 2;
+ } else if (line.startsWith("\\", index)) {
+ // Escape character seen (anywhere)
+ wasPrevEscape = !wasPrevEscape;
+ index++;
+ } else if (sectionType == null && line.startsWith(beeLine.getOpts().getDelimiter(), index)) {
+ // If the delimiter is seen, and the line isn't inside a section, then treat
+ // line[lastDelimiterIndex] to line[index] as a single command
addCmdPart(cmdList, command, line.substring(lastDelimiterIndex, index));
+ index += beeLine.getOpts().getDelimiter().length();
+ lastDelimiterIndex = index;
+ wasPrevEscape = false;
+ } else {
+ wasPrevEscape = false;
+ index++;
}
}
+ // If the line doesn't end with the delimiter or if the line is empty, add the cmd part
+ if (lastDelimiterIndex != index || line.length() == 0) {
+ addCmdPart(cmdList, command, line.substring(lastDelimiterIndex, index));
+ }
return cmdList;
}
diff --git a/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java b/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java
index d425679b8b..968bdf718e 100644
--- a/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java
+++ b/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java
@@ -23,8 +23,8 @@
package org.apache.hive.beeline;
import org.apache.commons.io.output.StringBuilderWriter;
-import org.apache.commons.lang.BooleanUtils;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.BooleanUtils;
+import org.apache.commons.lang3.StringUtils;
import org.supercsv.encoder.CsvEncoder;
import org.supercsv.encoder.DefaultCsvEncoder;
import org.supercsv.encoder.SelectiveCsvEncoder;
diff --git a/beeline/src/test/org/apache/hive/beeline/TestCommands.java b/beeline/src/test/org/apache/hive/beeline/TestCommands.java
index 567ca25270..2145b5c83a 100644
--- a/beeline/src/test/org/apache/hive/beeline/TestCommands.java
+++ b/beeline/src/test/org/apache/hive/beeline/TestCommands.java
@@ -18,12 +18,13 @@
package org.apache.hive.beeline;
-import org.junit.Test;
-
import static org.apache.hive.common.util.HiveStringUtils.removeComments;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
+import java.util.Arrays;
+
+import org.junit.Test;
public class TestCommands {
@@ -59,5 +60,286 @@ public void testBeelineCommands() throws IOException {
BeeLine.mainWithInputRedirection(
new String[] {"-u", "jdbc:hive2://", "-e", "create table t1(x int); show tables"}, null);
}
+
+ /**
+ * Test {@link Commands#getCmdList(String, boolean)} with various nesting of special characters:
+ * apostrophe, quotation mark, newline, comment start, semicolon.
+ * @throws Exception
+ */
+ @Test
+ public void testGetCmdList() throws Exception {
+ BeeLine beeline = new BeeLine();
+ Commands commands = new Commands(beeline);
+
+ try {
+ // COMMANDS, WHITE SPACES
+
+ // trivial
+ assertEquals(
+ Arrays.asList(""),
+ commands.getCmdList("", false)
+ );
+ assertEquals(
+ Arrays.asList(""),
+ commands.getCmdList(";", false)
+ );
+ assertEquals(
+ Arrays.asList(" "),
+ commands.getCmdList(" ;", false)
+ );
+ assertEquals(
+ Arrays.asList("", " "),
+ commands.getCmdList("; ", false)
+ );
+ assertEquals(
+ Arrays.asList(" ", " "),
+ commands.getCmdList(" ; ", false)
+ );
+ assertEquals(
+ Arrays.asList(" ; "),
+ commands.getCmdList(" \\; ", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1"),
+ commands.getCmdList("select 1;", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1"),
+ commands.getCmdList("select 1", false)
+ );
+ // add whitespace
+ assertEquals(
+ Arrays.asList(" \n select \n 1 \n "),
+ commands.getCmdList(" \n select \n 1 \n ;", false)
+ );
+ // add whitespace after semicolon
+ assertEquals(
+ Arrays.asList(" \n select 1 \n ", " \n "),
+ commands.getCmdList(" \n select 1 \n ; \n ", false)
+ );
+ // second command
+ assertEquals(
+ Arrays.asList("select 1", "select 2"),
+ commands.getCmdList("select 1;select 2;", false)
+ );
+ // second command, no ending semicolon
+ assertEquals(
+ Arrays.asList("select 1", "select 2"),
+ commands.getCmdList("select 1;select 2", false)
+ );
+ // three commands with whitespaces
+ assertEquals(
+ Arrays.asList(" \n select \t 1", "\tselect\n2\r", " select\n3", " "),
+ commands.getCmdList(" \n select \t 1;\tselect\n2\r; select\n3; ", false)
+ );
+
+ // ADD STRINGS
+
+ // trivial string
+ assertEquals(
+ Arrays.asList("select 'foo'"),
+ commands.getCmdList("select 'foo';", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"foo\""),
+ commands.getCmdList("select \"foo\";", false)
+ );
+ assertEquals(
+ Arrays.asList("select 'foo'", " select 2"),
+ commands.getCmdList("select 'foo'; select 2;", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"foo\"", " select 2"),
+ commands.getCmdList("select \"foo\"; select 2", false)
+ );
+ assertEquals(
+ Arrays.asList("select ''", " select \"\""),
+ commands.getCmdList("select ''; select \"\"", false)
+ );
+ // string containing delimiter of other string
+ assertEquals(
+ Arrays.asList("select 'foo\"bar'"),
+ commands.getCmdList("select 'foo\"bar';", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"foo'bar\""),
+ commands.getCmdList("select \"foo'bar\";", false)
+ );
+ assertEquals(
+ Arrays.asList("select 'foo\"bar'", " select 'foo\"bar'"),
+ commands.getCmdList("select 'foo\"bar'; select 'foo\"bar';", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"foo'bar\"", " select \"foo'bar\""),
+ commands.getCmdList("select \"foo'bar\"; select \"foo'bar\"", false)
+ );
+ assertEquals(
+ Arrays.asList("select '\"' ", " select \"'\" "),
+ commands.getCmdList("select '\"' ; select \"'\" ;", false)
+ );
+ // string containing semicolon
+ assertEquals(
+ Arrays.asList("select 'foo;bar'"),
+ commands.getCmdList("select 'foo;bar';", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"foo;bar\""),
+ commands.getCmdList("select \"foo;bar\";", false)
+ );
+ // two selects of strings vs. one select containing semicolon
+ assertEquals(
+ Arrays.asList("select '\"foobar'", " select 'foobar\"'"),
+ commands.getCmdList("select '\"foobar'; select 'foobar\"';", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"'foobar'; select 'foobar'\""),
+ commands.getCmdList("select \"'foobar'; select 'foobar'\";", false)
+ );
+ // newline within strings
+ assertEquals(
+ Arrays.asList("select 'multi\nline\nstring'", " select 'allowed'"),
+ commands.getCmdList("select 'multi\nline\nstring'; select 'allowed';", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"multi\nline\nstring\"", " select \"allowed\""),
+ commands.getCmdList("select \"multi\nline\nstring\"; select \"allowed\";", false)
+ );
+ assertEquals(
+ Arrays.asList("select ';\nselect 1;\n'", " select 'sql within string'"),
+ commands.getCmdList("select ';\nselect 1;\n'; select 'sql within string';", false)
+ );
+ // escaped quotation marks in strings
+ assertEquals(
+ Arrays.asList("select 'fo\\'o'"),
+ commands.getCmdList("select 'fo\\'o';", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"fo\\\"o\""),
+ commands.getCmdList("select \"fo\\\"o\";", false)
+ );
+ assertEquals(
+ Arrays.asList("select 'fo\\\"o'"),
+ commands.getCmdList("select 'fo\\\"o';", false)
+ );
+ assertEquals(
+ Arrays.asList("select \"fo\\'o\""),
+ commands.getCmdList("select \"fo\\'o\";", false)
+ );
+ // strings ending with backslash
+ assertEquals(
+ Arrays.asList("select 'foo\\\\'", " select \"bar\\\\\""),
+ commands.getCmdList("select 'foo\\\\'; select \"bar\\\\\";", false)
+ );
+
+ // ADD LINE COMMENTS
+
+ // line comments
+ assertEquals(
+ Arrays.asList("select 1", " -- comment\nselect 2", " -- comment\n"),
+ commands.getCmdList("select 1; -- comment\nselect 2; -- comment\n", false)
+ );
+ assertEquals(
+ Arrays.asList("select -- comment\n1", " select -- comment\n2"),
+ commands.getCmdList("select -- comment\n1; select -- comment\n2;", false)
+ );
+ assertEquals(
+ Arrays.asList("select -- comment 1; select -- comment 2;"),
+ commands.getCmdList("select -- comment 1; select -- comment 2;", false)
+ );
+ assertEquals(
+ Arrays.asList("select -- comment\\\n1", " select -- comment\\\n2"),
+ commands.getCmdList("select -- comment\\\n1; select -- comment\\\n2;", false)
+ );
+ // line comments with semicolons
+ assertEquals(
+ Arrays.asList("select 1 -- invalid;\nselect 2"),
+ commands.getCmdList("select 1 -- invalid;\nselect 2;", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1 -- valid\n", "select 2"),
+ commands.getCmdList("select 1 -- valid\n;select 2;", false)
+ );
+ // line comments with quotation marks
+ assertEquals(
+ Arrays.asList("select 1 -- v'lid\n", "select 2", "select 3"),
+ commands.getCmdList("select 1 -- v'lid\n;select 2;select 3;", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1 -- v\"lid\n", "select 2", "select 3"),
+ commands.getCmdList("select 1 -- v\"lid\n;select 2;select 3;", false)
+ );
+ assertEquals(
+ Arrays.asList("", "select 1 -- '\n", "select \"'\"", "select 3 -- \"\n", "?"),
+ commands.getCmdList(";select 1 -- '\n;select \"'\";select 3 -- \"\n;?", false)
+ );
+ assertEquals(
+ Arrays.asList("", "select 1 -- ';select \"'\"\n", "select 3 -- \"\n", "?"),
+ commands.getCmdList(";select 1 -- ';select \"'\"\n;select 3 -- \"\n;?", false)
+ );
+
+ // ADD BLOCK COMMENTS
+
+ // block comments with semicolons
+ assertEquals(
+ Arrays.asList("select 1", " select /* */ 2", " select /* */ 3"),
+ commands.getCmdList("select 1; select /* */ 2; select /* */ 3;", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1", " select /* ; */ 2", " select /* ; */ 3"),
+ commands.getCmdList("select 1; select /* ; */ 2; select /* ; */ 3;", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1 /* c1; */", " /**/ select 2 /*/ c3; /*/", " select 3", " /* c4 */"),
+ commands.getCmdList("select 1 /* c1; */; /**/ select 2 /*/ c3; /*/; select 3; /* c4 */", false)
+ );
+ // block comments with line comments
+ assertEquals(
+ Arrays.asList("select 1 --lc /* fake bc\n", "select 2 --lc */\n"),
+ commands.getCmdList("select 1 --lc /* fake bc\n;select 2 --lc */\n;", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1 /*bc -- fake lc\n;select 2 --lc */\n"),
+ commands.getCmdList("select 1 /*bc -- fake lc\n;select 2 --lc */\n;", false)
+ );
+ // block comments with quotation marks
+ assertEquals(
+ Arrays.asList("select 1 /* v'lid */", "select 2", "select 3"),
+ commands.getCmdList("select 1 /* v'lid */;select 2;select 3;", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1 /* v\"lid */", "select 2", "select 3"),
+ commands.getCmdList("select 1 /* v\"lid */;select 2;select 3;", false)
+ );
+ assertEquals(
+ Arrays.asList("", "select 1 /* ' */", "select \"'\"", "select 3 /* \" */", "?"),
+ commands.getCmdList(";select 1 /* ' */;select \"'\";select 3 /* \" */;?", false)
+ );
+ assertEquals(
+ Arrays.asList("", "select 1 /*/ ' ;select \"'\" /*/", "select 3 /* \" */", "?"),
+ commands.getCmdList(";select 1 /*/ ' ;select \"'\" /*/;select 3 /* \" */;?", false)
+ );
+
+ // UNTERMINATED STRING, COMMENT
+
+ assertEquals(
+ Arrays.asList("select 1", " -- ;\\';\\\";--; ;/*;*/; '; ';\";\";"),
+ commands.getCmdList("select 1; -- ;\\';\\\";--; ;/*;*/; '; ';\";\";", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1", " /* ;\\';\\\";--;\n;/*; ; '; ';\";\";"),
+ commands.getCmdList("select 1; /* ;\\';\\\";--;\n;/*; ; '; ';\";\";", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1", " ' ;\\';\\\";--;\n;/*;*/; ; ;\";\";"),
+ commands.getCmdList("select 1; ' ;\\';\\\";--;\n;/*;*/; ; ;\";\";", false)
+ );
+ assertEquals(
+ Arrays.asList("select 1", " \" ;\\';\\\";--;\n;/*;*/; '; '; ; ;"),
+ commands.getCmdList("select 1; \" ;\\';\\\";--;\n;/*;*/; '; '; ; ;", false)
+ );
+ } finally {
+ beeline.close();
+ }
+ }
}
diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
index 65062aeed4..cdd08ce7c9 100644
--- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
+++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
@@ -37,7 +37,7 @@
import java.util.Map;
import java.util.Set;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 61aca56bac..cef8fde594 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -1034,7 +1034,7 @@ public static URI getURI(String path) throws URISyntaxException {
* @return the list of the file names in the format of URI formats.
*/
public static Set getJarFilesByPath(String pathString, Configuration conf) {
- if (org.apache.commons.lang.StringUtils.isBlank(pathString)) {
+ if (org.apache.commons.lang3.StringUtils.isBlank(pathString)) {
return Collections.emptySet();
}
Set result = new HashSet<>();
diff --git a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
index 874a3e1274..d409a2133a 100644
--- a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
@@ -219,10 +219,13 @@ public static String maskIfPassword(String key, String value) {
* Register logging context so that log system can print QueryId, SessionId, etc for each message
*/
public static void registerLoggingContext(Configuration conf) {
- MDC.put(SESSIONID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVESESSIONID));
- MDC.put(QUERYID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID));
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) {
+ MDC.put(SESSIONID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVESESSIONID));
+ MDC.put(QUERYID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID));
MDC.put(OPERATIONLOG_LEVEL_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL));
+ l4j.info("Thread context registration is done.");
+ } else {
+ l4j.info("Thread context registration is skipped.");
}
}
@@ -230,7 +233,11 @@ public static void registerLoggingContext(Configuration conf) {
* Unregister logging context
*/
public static void unregisterLoggingContext() {
- MDC.clear();
+ // Remove the keys added, don't use clear, as it may clear all other things which are not intended to be removed.
+ MDC.remove(SESSIONID_LOG_KEY);
+ MDC.remove(QUERYID_LOG_KEY);
+ MDC.remove(OPERATIONLOG_LEVEL_KEY);
+ l4j.info("Unregistered logging context.");
}
/**
diff --git a/common/src/java/org/apache/hadoop/hive/common/cli/HiveFileProcessor.java b/common/src/java/org/apache/hadoop/hive/common/cli/HiveFileProcessor.java
index 62d85605bd..1a6f37188f 100644
--- a/common/src/java/org/apache/hadoop/hive/common/cli/HiveFileProcessor.java
+++ b/common/src/java/org/apache/hadoop/hive/common/cli/HiveFileProcessor.java
@@ -21,7 +21,7 @@
import java.io.BufferedReader;
import java.io.IOException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.IOUtils;
/**
diff --git a/common/src/java/org/apache/hadoop/hive/common/format/datetime/HiveSqlDateTimeFormatter.java b/common/src/java/org/apache/hadoop/hive/common/format/datetime/HiveSqlDateTimeFormatter.java
index f6a52e9557..1a0d7e6a27 100644
--- a/common/src/java/org/apache/hadoop/hive/common/format/datetime/HiveSqlDateTimeFormatter.java
+++ b/common/src/java/org/apache/hadoop/hive/common/format/datetime/HiveSqlDateTimeFormatter.java
@@ -18,10 +18,12 @@
package org.apache.hadoop.hive.common.format.datetime;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.WordUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.common.type.Date;
import org.apache.hadoop.hive.common.type.Timestamp;
@@ -45,6 +47,7 @@
import java.util.List;
import java.util.Locale;
import java.util.Map;
+import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -396,15 +399,20 @@
public class HiveSqlDateTimeFormatter implements Serializable {
+ private static final long serialVersionUID = 1L;
+
private static final int LONGEST_TOKEN_LENGTH = 5;
private static final int LONGEST_ACCEPTED_PATTERN = 100; // for sanity's sake
private static final int NANOS_MAX_LENGTH = 9;
+ private static final DateTimeFormatter MONTH_FORMATTER = DateTimeFormatter.ofPattern("MMM");
+
public static final int AM = 0;
public static final int PM = 1;
- private static final DateTimeFormatter MONTH_FORMATTER = DateTimeFormatter.ofPattern("MMM");
public static final DateTimeFormatter DAY_OF_WEEK_FORMATTER = DateTimeFormatter.ofPattern("EEE");
- private String pattern;
- private List tokens = new ArrayList<>();
+
+ private final String pattern;
+ private final List tokens;
+ private final Optional now;
private boolean formatExact = false;
private static final Map NUMERIC_TEMPORAL_TOKENS =
@@ -485,6 +493,9 @@
* Token representation.
*/
public static class Token implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
TokenType type;
TemporalField temporalField; // for type TEMPORAL e.g. ChronoField.YEAR
TemporalUnit temporalUnit; // for type TIMEZONE e.g. ChronoUnit.HOURS
@@ -536,20 +547,39 @@ public void removeBackslashes() {
}
}
- public HiveSqlDateTimeFormatter(String pattern, boolean forParsing) {
- setPattern(pattern, forParsing);
+ /**
+ * Construct a new instance.
+ *
+ * @param pattern Pattern to use for parsing or formatting
+ * @param forParsing Flag to indicate use of pattern
+ * @throws IllegalArgumentException if pattern is invalid
+ */
+ public HiveSqlDateTimeFormatter(final String pattern, final boolean forParsing) {
+ this(pattern, forParsing, Optional.absent());
}
/**
- * Parse and perhaps verify the pattern.
+ * Construct a new instance. An optional LocalDateTime can be provided when
+ * parsing must populate a field provided in the format string does not
+ * specify the date and time to use. If none is provided, the current
+ * {@link LocalDateTime#now()} will be used for each call to parse and format.
+ *
+ * @param pattern Pattern to use for parsing or formatting
+ * @param forParsing Flag to indicate use of pattern
+ * @param now Set an arbitrary context of the current local time
+ * @throws IllegalArgumentException if pattern is invalid
*/
- private void setPattern(String pattern, boolean forParsing) {
- assert pattern.length() < LONGEST_ACCEPTED_PATTERN : "The input format is too long";
- this.pattern = pattern;
+ @VisibleForTesting
+ HiveSqlDateTimeFormatter(final String pattern, final boolean forParsing, final Optional now) {
+ this.pattern = Objects.requireNonNull(pattern, "Pattern cannot be null");
+ this.now = Objects.requireNonNull(now);
+
+ this.tokens = new ArrayList<>();
+
+ Preconditions.checkArgument(pattern.length() < LONGEST_ACCEPTED_PATTERN, "The input format is too long");
parsePatternToTokens(pattern);
- // throw IllegalArgumentException if pattern is invalid
if (forParsing) {
verifyForParse();
} else {
@@ -759,8 +789,8 @@ private int getTokenStringLength(String candidate) {
private void verifyForParse() {
// create a list of tokens' temporal fields
- ArrayList temporalFields = new ArrayList<>();
- ArrayList timeZoneTemporalUnits = new ArrayList<>();
+ List temporalFields = new ArrayList<>();
+ List timeZoneTemporalUnits = new ArrayList<>();
int roundYearCount=0, yearCount=0;
boolean containsIsoFields=false, containsGregorianFields=false;
for (Token token : tokens) {
@@ -828,7 +858,7 @@ private void verifyForParse() {
for (TemporalField tokenType : temporalFields) {
if (Collections.frequency(temporalFields, tokenType) > 1) {
throw new IllegalArgumentException(
- "Invalid duplication of format element: multiple " + tokenType.toString()
+ "Invalid duplication of format element: multiple " + tokenType
+ " tokens provided.");
}
}
@@ -934,10 +964,10 @@ private String formatNumericTemporal(int value, Token token) {
value = 12;
}
try {
- output = String.valueOf(value);
+ output = Integer.toString(value);
output = padOrTruncateNumericTemporal(token, output);
} catch (Exception e) {
- throw new IllegalArgumentException("Value: " + value + " couldn't be cast to string.", e);
+ throw new IllegalArgumentException("Value: " + value + " could not be cast to string.", e);
}
}
return output;
@@ -1009,12 +1039,12 @@ private String padOrTruncateNumericTemporal(Token token, String output) {
return output;
}
- public Timestamp parseTimestamp(String fullInput){
+ public Timestamp parseTimestamp(final String fullInput) {
LocalDateTime ldt = LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC);
String substring;
int index = 0;
int value;
- int timeZoneSign = 0, timeZoneHours = 0, timeZoneMinutes = 0;
+ int timeZoneHours = 0, timeZoneMinutes = 0;
int iyyy = 0, iw = 0;
for (Token token : tokens) {
@@ -1032,7 +1062,7 @@ public Timestamp parseTimestamp(String fullInput){
ldt = ldt.with(token.temporalField, value);
} catch (DateTimeException e){
throw new IllegalArgumentException(
- "Value " + value + " not valid for token " + token.toString());
+ "Value " + value + " not valid for token " + token);
}
//update IYYY and IW if necessary
@@ -1048,7 +1078,6 @@ public Timestamp parseTimestamp(String fullInput){
case TIMEZONE:
if (token.temporalUnit == ChronoUnit.HOURS) {
String nextCharacter = fullInput.substring(index, index + 1);
- timeZoneSign = "-".equals(nextCharacter) ? -1 : 1;
if ("-".equals(nextCharacter) || "+".equals(nextCharacter)) {
index++;
}
@@ -1159,7 +1188,7 @@ private String getNextNumericSubstring(String s, int begin, int end, Token token
/**
* Get the integer value of a temporal substring.
*/
- private int parseNumericTemporal(String substring, Token token){
+ private int parseNumericTemporal(String substring, Token token) {
checkFormatExact(substring, token);
// exceptions to the rule
@@ -1174,9 +1203,9 @@ private int parseNumericTemporal(String substring, Token token){
String currentYearString;
if (token.temporalField == ChronoField.YEAR) {
- currentYearString = String.valueOf(LocalDateTime.now().getYear());
+ currentYearString = Integer.toString(this.now.or(LocalDateTime.now()).getYear());
} else {
- currentYearString = String.valueOf(LocalDateTime.now().get(IsoFields.WEEK_BASED_YEAR));
+ currentYearString = Integer.toString(this.now.or(LocalDateTime.now()).get(IsoFields.WEEK_BASED_YEAR));
}
//deal with round years
@@ -1189,7 +1218,7 @@ private int parseNumericTemporal(String substring, Token token){
} else if (valLast2Digits >= 50 && currLast2Digits < 50) {
currFirst2Digits -= 1;
}
- substring = String.valueOf(currFirst2Digits) + substring;
+ substring = Integer.toString(currFirst2Digits) + substring;
} else { // fill in prefix digits with current date
substring = currentYearString.substring(0, 4 - substring.length()) + substring;
}
@@ -1291,7 +1320,7 @@ private void checkFormatExact(String substring, Token token) {
&& !(token.fillMode || token.temporalField == ChronoField.NANO_OF_SECOND)
&& token.length != substring.length()) {
throw new IllegalArgumentException(
- "FX on and expected token length " + token.length + " for token " + token.toString()
+ "FX on and expected token length " + token.length + " for token " + token
+ " does not match substring (" + substring + ") length " + substring.length());
}
}
@@ -1328,8 +1357,8 @@ private int parseSeparator(String fullInput, int index, Token token) {
throw new IllegalArgumentException("Missing separator at index " + index);
}
if (formatExact && !token.string.equals(separatorsFound.toString())) {
- throw new IllegalArgumentException("FX on and separator found: " + separatorsFound.toString()
- + " doesn't match expected separator: " + token.string);
+ throw new IllegalArgumentException("FX on and separator found: " + separatorsFound
+ + " does not match expected separator: " + token.string);
}
return begin + separatorsFound.length();
@@ -1361,10 +1390,11 @@ private boolean isLastCharacterOfSeparator(int index, String string) {
*/
private boolean nextTokenIs(String pattern, Token currentToken) {
// make sure currentToken isn't the last one
- if (tokens.indexOf(currentToken) == tokens.size() - 1) {
+ final int idx = tokens.indexOf(currentToken);
+ if (idx == tokens.size() - 1) {
return false;
}
- Token nextToken = tokens.get(tokens.indexOf(currentToken) + 1);
+ Token nextToken = tokens.get(idx + 1);
pattern = pattern.toLowerCase();
return (isTimeZoneToken(pattern) && TIME_ZONE_TOKENS.get(pattern) == nextToken.temporalUnit
|| isNumericTemporalToken(pattern) && NUMERIC_TEMPORAL_TOKENS.get(pattern) == nextToken.temporalField
@@ -1383,6 +1413,6 @@ public String getPattern() {
}
private static String capitalize(String substring) {
- return WordUtils.capitalize(substring.toLowerCase());
+ return StringUtils.capitalize(substring.toLowerCase());
}
}
diff --git a/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java b/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java
index 37cc12d303..767edcf715 100644
--- a/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java
+++ b/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java
@@ -20,7 +20,7 @@
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import jline.TerminalFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.fusesource.jansi.Ansi;
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java b/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java
index 2bb2ca24ca..d9ed3e0777 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hive.common.type;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
public abstract class HiveBaseChar {
protected String value;
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/HiveChar.java b/common/src/java/org/apache/hadoop/hive/common/type/HiveChar.java
index f0b28c720d..f4600a353b 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/HiveChar.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/HiveChar.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hive.common.type;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
/**
* HiveChar.
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/Timestamp.java b/common/src/java/org/apache/hadoop/hive/common/type/Timestamp.java
index f2c1493f56..0193aba0f7 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/Timestamp.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/Timestamp.java
@@ -21,6 +21,7 @@
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
+import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeFormatterBuilder;
@@ -182,6 +183,10 @@ public static Timestamp ofEpochSecond(long epochSecond, int nanos) {
LocalDateTime.ofEpochSecond(epochSecond, nanos, ZoneOffset.UTC));
}
+ public static Timestamp ofEpochSecond(long epochSecond, long nanos, ZoneId zone) {
+ return new Timestamp(LocalDateTime.ofInstant(Instant.ofEpochSecond(epochSecond, nanos), zone));
+ }
+
public static Timestamp ofEpochMilli(long epochMilli) {
return new Timestamp(LocalDateTime
.ofInstant(Instant.ofEpochMilli(epochMilli), ZoneOffset.UTC));
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index e7724f9084..a120b4573d 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -22,7 +22,7 @@
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.ZooKeeperHiveHelper;
@@ -452,6 +452,11 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
REPLCMRETIAN("hive.repl.cm.retain","24h",
new TimeValidator(TimeUnit.HOURS),
"Time to retain removed files in cmrootdir."),
+ REPLCMENCRYPTEDDIR("hive.repl.cm.encryptionzone.rootdir", ".cmroot",
+ "Root dir for ChangeManager if encryption zones are enabled, used for deleted files."),
+ REPLCMFALLBACKNONENCRYPTEDDIR("hive.repl.cm.nonencryptionzone.rootdir",
+ "/user/${system:user.name}/cmroot/",
+ "Root dir for ChangeManager for non encrypted paths if hive.repl.cmrootdir is encrypted."),
REPLCMINTERVAL("hive.repl.cm.interval","3600s",
new TimeValidator(TimeUnit.SECONDS),
"Inteval for cmroot cleanup thread."),
@@ -2384,6 +2389,8 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
"would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."),
HIVE_OPTIMIZE_TOPNKEY("hive.optimize.topnkey", true, "Whether to enable top n key optimizer."),
+ HIVE_MAX_TOPN_ALLOWED("hive.optimize.topnkey.max", 128, "Maximum topN value allowed by top n key optimizer.\n" +
+ "If the LIMIT is greater than this value then top n key optimization won't be used."),
HIVE_SHARED_WORK_OPTIMIZATION("hive.optimize.shared.work", true,
"Whether to enable shared work optimizer. The optimizer finds scan operator over the same table\n" +
@@ -2644,10 +2651,17 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
"In nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\n" +
"allows two concurrent writes to the same partition but still lets lock manager prevent\n" +
"DROP TABLE etc. when the table is being written to"),
+ HIVE_TXN_NONACID_READ_LOCKS("hive.txn.nonacid.read.locks", true,
+ "Flag to turn off the read locks for non-ACID tables, when set to false.\n" +
+ "Could be exercised to improve the performance of non-ACID tables in clusters where read locking " +
+ "is enabled globally to support ACID. Can cause issues with concurrent DDL operations, or slow S3 writes."),
+ HIVE_TXN_READ_LOCKS("hive.txn.read.locks", true,
+ "Flag to turn off the read locks, when set to false. Although its not recommended, \n" +
+ "but in performance critical scenarios this option may be exercised."),
TXN_OVERWRITE_X_LOCK("hive.txn.xlock.iow", true,
"Ensures commands with OVERWRITE (such as INSERT OVERWRITE) acquire Exclusive locks for\n" +
- "transactional tables. This ensures that inserts (w/o overwrite) running concurrently\n" +
- "are not hidden by the INSERT OVERWRITE."),
+ "transactional tables. This ensures that inserts (w/o overwrite) running concurrently\n" +
+ "are not hidden by the INSERT OVERWRITE."),
HIVE_TXN_STATS_ENABLED("hive.txn.stats.enabled", true,
"Whether Hive supports transactional stats (accurate stats for transactional tables)"),
@@ -2751,6 +2765,10 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
"has had a transaction done on it since the last major compaction. So decreasing this\n" +
"value will increase the load on the NameNode."),
+ HIVE_COMPACTOR_REQUEST_QUEUE("hive.compactor.request.queue", 1,
+ "Enables parallelization of the checkForCompaction operation, that includes many file metadata checks\n" +
+ "and may be expensive"),
+
HIVE_COMPACTOR_DELTA_NUM_THRESHOLD("hive.compactor.delta.num.threshold", 10,
"Number of delta directories in a table or partition that will trigger a minor\n" +
"compaction."),
@@ -3366,7 +3384,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
"launched on each of the queues specified by \"hive.server2.tez.default.queues\".\n" +
"Determines the parallelism on each queue."),
HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS("hive.server2.tez.initialize.default.sessions",
- false,
+ true,
"This flag is used in HiveServer2 to enable a user to use HiveServer2 without\n" +
"turning on Tez for HiveServer2. The user could potentially want to run queries\n" +
"over Tez without the pool of sessions."),
@@ -3661,15 +3679,15 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
"Whether enable loading UDFs from metastore on demand; this is mostly relevant for\n" +
"HS2 and was the default behavior before Hive 1.2. Off by default."),
- HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "6h",
+ HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "15m",
new TimeValidator(TimeUnit.MILLISECONDS, 3000l, true, null, false),
"The check interval for session/operation timeout, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT("hive.server2.close.session.on.disconnect", true,
"Session will be closed when connection is closed. Set this to false to have session outlive its parent connection."),
- HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "7d",
+ HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "4h",
new TimeValidator(TimeUnit.MILLISECONDS),
"Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value."),
- HIVE_SERVER2_IDLE_OPERATION_TIMEOUT("hive.server2.idle.operation.timeout", "5d",
+ HIVE_SERVER2_IDLE_OPERATION_TIMEOUT("hive.server2.idle.operation.timeout", "2h",
new TimeValidator(TimeUnit.MILLISECONDS),
"Operation will be closed when it's not accessed for this duration of time, which can be disabled by setting to zero value.\n" +
" With positive value, it's checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR).\n" +
@@ -4807,6 +4825,8 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal
new TimeValidator(TimeUnit.SECONDS),
"While scheduled queries are in flight; "
+ "a background update happens periodically to report the actual state of the query"),
+ HIVE_SCHEDULED_QUERIES_CREATE_AS_ENABLED("hive.scheduled.queries.create.as.enabled", true,
+ "This option sets the default behaviour of newly created scheduled queries."),
HIVE_SECURITY_AUTHORIZATION_SCHEDULED_QUERIES_SUPPORTED("hive.security.authorization.scheduled.queries.supported",
false,
"Enable this if the configured authorizer is able to handle scheduled query related calls."),
@@ -5480,7 +5500,9 @@ public static String getVar(Configuration conf, ConfVars var, EncoderDecoder LOG_PREFIX_LENGTH) {
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
index ebe64234c0..a28580cba1 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.hive.conf;
import com.google.common.collect.Iterables;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.Private;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
similarity index 94%
rename from ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
rename to common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 467ce50e6f..8e643fe844 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -15,17 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hive.ql;
-import org.antlr.runtime.tree.Tree;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
-import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.ASTNodeOrigin;
import org.apache.hadoop.security.AccessControlException;
import java.io.FileNotFoundException;
@@ -214,8 +209,7 @@
ALTER_COMMAND_FOR_VIEWS(10131, "To alter a view you need to use the ALTER VIEW command."),
ALTER_COMMAND_FOR_TABLES(10132, "To alter a base table you need to use the ALTER TABLE command."),
ALTER_VIEW_DISALLOWED_OP(10133, "Cannot use this form of ALTER on a view"),
- ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE can only be used for " + AlterTableType.NON_NATIVE_TABLE_ALLOWED +
- " to a non-native table "),
+ ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE can only be used for {0} to a non-native table {1}", true),
SORTMERGE_MAPJOIN_FAILED(10135,
"Sort merge bucketed join could not be performed. " +
"If you really want to perform the operation, either set " +
@@ -777,88 +771,6 @@ private ErrorMsg(int errorCode, String mesg, String sqlState, boolean format) {
this.format = format ? new MessageFormat(mesg) : null;
}
- private static int getLine(ASTNode tree) {
- if (tree.getChildCount() == 0) {
- return tree.getToken().getLine();
- }
-
- return getLine((ASTNode) tree.getChild(0));
- }
-
- private static int getCharPositionInLine(ASTNode tree) {
- if (tree.getChildCount() == 0) {
- return tree.getToken().getCharPositionInLine();
- }
-
- return getCharPositionInLine((ASTNode) tree.getChild(0));
- }
-
- // Dirty hack as this will throw away spaces and other things - find a better
- // way!
- public static String getText(ASTNode tree) {
- if (tree.getChildCount() == 0) {
- return tree.getText();
- }
- return getText((ASTNode) tree.getChild(tree.getChildCount() - 1));
- }
-
- public String getMsg(ASTNode tree) {
- StringBuilder sb = new StringBuilder();
- renderPosition(sb, tree);
- sb.append(" ");
- sb.append(mesg);
- sb.append(" '");
- sb.append(getText(tree));
- sb.append("'");
- renderOrigin(sb, tree.getOrigin());
- return sb.toString();
- }
-
- static final String LINE_SEP = System.getProperty("line.separator");
-
- public static void renderOrigin(StringBuilder sb, ASTNodeOrigin origin) {
- while (origin != null) {
- sb.append(" in definition of ");
- sb.append(origin.getObjectType());
- sb.append(" ");
- sb.append(origin.getObjectName());
- sb.append(" [");
- sb.append(LINE_SEP);
- sb.append(origin.getObjectDefinition());
- sb.append(LINE_SEP);
- sb.append("] used as ");
- sb.append(origin.getUsageAlias());
- sb.append(" at ");
- ASTNode usageNode = origin.getUsageNode();
- renderPosition(sb, usageNode);
- origin = usageNode.getOrigin();
- }
- }
-
- private static void renderPosition(StringBuilder sb, ASTNode tree) {
- sb.append("Line ");
- sb.append(getLine(tree));
- sb.append(":");
- sb.append(getCharPositionInLine(tree));
- }
- public static String renderPosition(ASTNode n) {
- StringBuilder sb = new StringBuilder();
- ErrorMsg.renderPosition(sb, n);
- return sb.toString();
- }
-
- public String getMsg(Tree tree) {
- return getMsg((ASTNode) tree);
- }
-
- public String getMsg(ASTNode tree, String reason) {
- return getMsg(tree) + ": " + reason;
- }
-
- public String getMsg(Tree tree, String reason) {
- return getMsg((ASTNode) tree, reason);
- }
-
public String getMsg(String reason) {
return mesg + " " + reason;
}
diff --git a/common/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java b/common/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java
new file mode 100644
index 0000000000..dec7a484b7
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.lib;
+
+import java.util.Stack;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Dispatcher interface for Operators Used in operator graph walking to dispatch
+ * process/visitor functions for operators.
+ */
+public interface Dispatcher {
+
+ /**
+ * Dispatcher function.
+ *
+ * @param nd
+ * operator to process.
+ * @param stack
+ * operator stack to process.
+ * @param nodeOutputs
+ * The argument list of outputs from processing other nodes that are
+ * passed to this dispatcher from the walker.
+ * @return Object The return object from the processing call.
+ * @throws HiveException
+ */
+ Object dispatch(Node nd, Stack stack, Object... nodeOutputs)
+ throws HiveException;
+
+}
diff --git a/common/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java b/common/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java
new file mode 100644
index 0000000000..37bb93d63d
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.lib;
+
+import java.util.Collection;
+import java.util.HashMap;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Interface for operator graph walker.
+ */
+public interface GraphWalker {
+
+ /**
+ * starting point for walking.
+ *
+ * @param startNodes
+ * list of starting operators
+ * @param nodeOutput
+ * If this parameter is not null, the call to the function returns
+ * the map from node to objects returned by the processors.
+ * @throws HiveException
+ */
+ void startWalking(Collection startNodes,
+ HashMap nodeOutput) throws HiveException;
+
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java b/common/src/java/org/apache/hadoop/hive/ql/lib/Node.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
rename to common/src/java/org/apache/hadoop/hive/ql/lib/Node.java
diff --git a/common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java b/common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java
new file mode 100644
index 0000000000..d8d1f5c746
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.lib;
+
+import java.util.Stack;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Base class for processing operators which is no-op. The specific processors
+ * can register their own context with the dispatcher.
+ */
+public interface NodeProcessor {
+
+ /**
+ * Generic process for all ops that don't have specific implementations.
+ *
+ * @param nd
+ * operator to process
+ * @param procCtx
+ * operator processor context
+ * @param nodeOutputs
+ * A variable argument list of outputs from other nodes in the walk
+ * @return Object to be returned by the process call
+ * @throws HiveException
+ */
+ Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
+ Object... nodeOutputs) throws HiveException;
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java b/common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java
rename to common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java
diff --git a/common/src/java/org/apache/hadoop/hive/ql/lib/Rule.java b/common/src/java/org/apache/hadoop/hive/ql/lib/Rule.java
new file mode 100644
index 0000000000..6594c2b5b2
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/ql/lib/Rule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.lib;
+
+import java.util.Stack;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Rule interface for Operators Used in operator dispatching to dispatch
+ * process/visitor functions for operators.
+ */
+public interface Rule {
+
+ /**
+ * @return the cost of the rule - the lower the cost, the better the rule
+ * matches
+ * @throws HiveException
+ */
+ int cost(Stack stack) throws HiveException;
+
+ /**
+ * @return the name of the rule - may be useful for debugging
+ */
+ String getName();
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java b/common/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
rename to common/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java b/common/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
rename to common/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticException.java b/common/src/java/org/apache/hadoop/hive/ql/parse/SemanticException.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticException.java
rename to common/src/java/org/apache/hadoop/hive/ql/parse/SemanticException.java
diff --git a/common/src/java/org/apache/hive/common/util/HiveStringUtils.java b/common/src/java/org/apache/hive/common/util/HiveStringUtils.java
index 196b9c457b..22948e38de 100644
--- a/common/src/java/org/apache/hive/common/util/HiveStringUtils.java
+++ b/common/src/java/org/apache/hive/common/util/HiveStringUtils.java
@@ -40,7 +40,7 @@
import java.util.regex.Pattern;
import com.google.common.base.Splitter;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.text.translate.CharSequenceTranslator;
import org.apache.commons.lang3.text.translate.EntityArrays;
import org.apache.commons.lang3.text.translate.LookupTranslator;
diff --git a/common/src/java/org/apache/hive/http/HttpServer.java b/common/src/java/org/apache/hive/http/HttpServer.java
index 52253f94ac..51a2be2bdf 100644
--- a/common/src/java/org/apache/hive/http/HttpServer.java
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -48,7 +48,7 @@
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.commons.math3.util.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
diff --git a/common/src/test/org/apache/hadoop/hive/common/format/datetime/TestHiveSqlDateTimeFormatter.java b/common/src/test/org/apache/hadoop/hive/common/format/datetime/TestHiveSqlDateTimeFormatter.java
index 3abf28b0b8..9c9b0bedcf 100644
--- a/common/src/test/org/apache/hadoop/hive/common/format/datetime/TestHiveSqlDateTimeFormatter.java
+++ b/common/src/test/org/apache/hadoop/hive/common/format/datetime/TestHiveSqlDateTimeFormatter.java
@@ -18,31 +18,33 @@
package org.apache.hadoop.hive.common.format.datetime;
-import com.sun.tools.javac.util.List;
-import org.apache.hadoop.hive.common.type.Date;
-import org.apache.hadoop.hive.common.type.Timestamp;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
-import java.time.LocalDate;
+import java.time.Instant;
import java.time.LocalDateTime;
+import java.time.ZoneOffset;
import java.time.temporal.ChronoField;
import java.time.temporal.TemporalField;
-import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.hadoop.hive.common.type.Date;
+import org.apache.hadoop.hive.common.type.Timestamp;
import org.junit.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+
+import com.google.common.base.Optional;
/**
* Tests HiveSqlDateTimeFormatter.
*/
-
public class TestHiveSqlDateTimeFormatter {
private HiveSqlDateTimeFormatter formatter;
@Test
public void testSetPattern() {
- verifyPatternParsing(" ---yyyy-\'-:- -,.;/MM-dd--", new ArrayList<>(List.of(
+ verifyPatternParsing(" ---yyyy-\'-:- -,.;/MM-dd--", Arrays.asList(
null, // represents separator, which has no temporal field
ChronoField.YEAR,
null,
@@ -50,10 +52,10 @@ public void testSetPattern() {
null,
ChronoField.DAY_OF_MONTH,
null
- )));
+ ));
verifyPatternParsing("ymmdddhh24::mi:ss A.M. pm", 25, "ymmdddhh24::mi:ss A.M. pm",
- new ArrayList<>(List.of(
+ Arrays.asList(
ChronoField.YEAR,
ChronoField.MONTH_OF_YEAR,
ChronoField.DAY_OF_YEAR,
@@ -62,7 +64,7 @@ public void testSetPattern() {
null, ChronoField.SECOND_OF_MINUTE,
null, ChronoField.AMPM_OF_DAY,
null, ChronoField.AMPM_OF_DAY
- )));
+ ));
}
@Test
@@ -153,8 +155,10 @@ public void testFormatTimestamp() {
checkFormatTs("YYYY-mm-dd: Q WW W", "2019-03-31 00:00:00", "2019-03-31: 1 13 5");
checkFormatTs("YYYY-mm-dd: Q WW W", "2019-04-01 00:00:00", "2019-04-01: 2 13 1");
checkFormatTs("YYYY-mm-dd: Q WW W", "2019-12-31 00:00:00", "2019-12-31: 4 53 5");
+ }
- //ISO 8601
+ @Test
+ public void testFormatTimestampIso8601() {
checkFormatTs("YYYY-MM-DD : IYYY-IW-ID", "2018-12-31 00:00:00", "2018-12-31 : 2019-01-01");
checkFormatTs("YYYY-MM-DD : IYYY-IW-ID", "2019-01-06 00:00:00", "2019-01-06 : 2019-01-07");
checkFormatTs("YYYY-MM-DD : IYYY-IW-ID", "2019-01-07 00:00:00", "2019-01-07 : 2019-02-01");
@@ -173,7 +177,8 @@ public void testFormatTimestamp() {
}
private void checkFormatTs(String pattern, String input, String expectedOutput) {
- formatter = new HiveSqlDateTimeFormatter(pattern, false);
+ formatter = new HiveSqlDateTimeFormatter(pattern, false,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
assertEquals("Format timestamp to string failed with pattern: " + pattern,
expectedOutput, formatter.format(Timestamp.valueOf(input)));
}
@@ -190,33 +195,31 @@ public void testFormatDate() {
}
private void checkFormatDate(String pattern, String input, String expectedOutput) {
- formatter = new HiveSqlDateTimeFormatter(pattern, false);
+ formatter = new HiveSqlDateTimeFormatter(pattern, false,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
assertEquals("Format date to string failed with pattern: " + pattern,
expectedOutput, formatter.format(Date.valueOf(input)));
}
@Test
public void testParseTimestamp() {
- String thisYearString = String.valueOf(LocalDateTime.now().getYear());
- int firstTwoDigits = getFirstTwoDigits();
-
//y
- checkParseTimestamp("y-mm-dd", "0-02-03", thisYearString.substring(0, 3) + "0-02-03 00:00:00");
- checkParseTimestamp("yy-mm-dd", "00-02-03", thisYearString.substring(0, 2) + "00-02-03 00:00:00");
- checkParseTimestamp("yyy-mm-dd", "000-02-03", thisYearString.substring(0, 1) + "000-02-03 00:00:00");
- checkParseTimestamp("yyyy-mm-dd", "000-02-03", thisYearString.substring(0, 1) + "000-02-03 00:00:00");
- checkParseTimestamp("rr-mm-dd", "0-02-03", thisYearString.substring(0, 3) + "0-02-03 00:00:00");
- checkParseTimestamp("rrrr-mm-dd", "000-02-03", thisYearString.substring(0, 1) + "000-02-03 00:00:00");
+ checkParseTimestamp("y-mm-dd", "0-02-03", "1970-02-03 00:00:00");
+ checkParseTimestamp("yy-mm-dd", "00-02-03", "1900-02-03 00:00:00");
+ checkParseTimestamp("yyy-mm-dd", "000-02-03", "1000-02-03 00:00:00");
+ checkParseTimestamp("yyyy-mm-dd", "000-02-03", "1000-02-03 00:00:00");
+ checkParseTimestamp("rr-mm-dd", "0-02-03", "1970-02-03 00:00:00");
+ checkParseTimestamp("rrrr-mm-dd", "000-02-03", "1000-02-03 00:00:00");
//rr, rrrr
- checkParseTimestamp("rr-mm-dd", "00-02-03", firstTwoDigits + 1 + "00-02-03 00:00:00");
- checkParseTimestamp("rr-mm-dd", "49-02-03", firstTwoDigits + 1 + "49-02-03 00:00:00");
- checkParseTimestamp("rr-mm-dd", "50-02-03", firstTwoDigits + "50-02-03 00:00:00");
- checkParseTimestamp("rr-mm-dd", "99-02-03", firstTwoDigits + "99-02-03 00:00:00");
- checkParseTimestamp("rrrr-mm-dd", "00-02-03", firstTwoDigits + 1 + "00-02-03 00:00:00");
- checkParseTimestamp("rrrr-mm-dd", "49-02-03", firstTwoDigits + 1 + "49-02-03 00:00:00");
- checkParseTimestamp("rrrr-mm-dd", "50-02-03", firstTwoDigits + "50-02-03 00:00:00");
- checkParseTimestamp("rrrr-mm-dd", "99-02-03", firstTwoDigits + "99-02-03 00:00:00");
+ checkParseTimestamp("rr-mm-dd", "00-02-03", "2000-02-03 00:00:00");
+ checkParseTimestamp("rr-mm-dd", "49-02-03", "2049-02-03 00:00:00");
+ checkParseTimestamp("rr-mm-dd", "50-02-03", "1950-02-03 00:00:00");
+ checkParseTimestamp("rr-mm-dd", "99-02-03", "1999-02-03 00:00:00");
+ checkParseTimestamp("rrrr-mm-dd", "00-02-03", "2000-02-03 00:00:00");
+ checkParseTimestamp("rrrr-mm-dd", "49-02-03", "2049-02-03 00:00:00");
+ checkParseTimestamp("rrrr-mm-dd", "50-02-03", "1950-02-03 00:00:00");
+ checkParseTimestamp("rrrr-mm-dd", "99-02-03", "1999-02-03 00:00:00");
//everything else
checkParseTimestamp("yyyy-mm-ddThh24:mi:ss.ff8z", "2018-02-03T04:05:06.5665Z", "2018-02-03 04:05:06.5665");
@@ -230,7 +233,7 @@ public void testParseTimestamp() {
checkParseTimestamp("YYYY-MM-DD HH24:MI TZH:TZM", "2019-1-1 14:00-1:30", "2019-01-01 14:00:00");
checkParseTimestamp("yyyy-mm-dd TZM:TZH", "2019-01-01 1 -3", "2019-01-01 00:00:00");
checkParseTimestamp("yyyy-mm-dd TZH:TZM", "2019-01-01 -0:30", "2019-01-01 00:00:00");
- checkParseTimestamp("TZM/YYY-MM-TZH/DD", "0/333-01-11/02", "2333-01-02 00:00:00");
+ checkParseTimestamp("TZM/YYY-MM-TZH/DD", "0/333-01-11/02", "1333-01-02 00:00:00");
checkParseTimestamp("YYYY-MM-DD HH12:MI AM", "2019-01-01 11:00 p.m.", "2019-01-01 23:00:00");
checkParseTimestamp("YYYY-MM-DD HH12:MI A.M..", "2019-01-01 11:00 pm.", "2019-01-01 23:00:00");
checkParseTimestamp("MI DD-TZM-YYYY-MM TZHPM SS:HH12.FF9",
@@ -267,81 +270,77 @@ public void testParseTimestamp() {
//letters and numbers are delimiters to each other, respectively
checkParseDate("yyyy-ddMONTH", "2018-4March", "2018-03-04");
checkParseDate("yyyy-MONTHdd", "2018-March4", "2018-03-04");
- //ISO 8601
+ }
+
+ @Test
+ public void testParseTimestampISO8601() {
checkParseTimestamp("IYYY-IW-ID", "2019-01-01", "2018-12-31 00:00:00");
checkParseTimestamp("IYYY-IW-ID", "2019-01-07", "2019-01-06 00:00:00");
checkParseTimestamp("IYYY-IW-ID", "2019-02-01", "2019-01-07 00:00:00");
checkParseTimestamp("IYYY-IW-ID", "2019-52-07", "2019-12-29 00:00:00");
checkParseTimestamp("IYYY-IW-ID", "2020-01-01", "2019-12-30 00:00:00");
- checkParseTimestamp("IYYY-IW-ID", "020-01-04", thisYearString.substring(0, 1) + "020-01-02 00:00:00");
- checkParseTimestamp("IYY-IW-ID", "020-01-04", thisYearString.substring(0, 1) + "020-01-02 00:00:00");
- checkParseTimestamp("IYY-IW-ID", "20-01-04", thisYearString.substring(0, 2) + "20-01-02 00:00:00");
- checkParseTimestamp("IY-IW-ID", "20-01-04", thisYearString.substring(0, 2) + "20-01-02 00:00:00");
+ checkParseTimestamp("IYYY-IW-ID", "020-01-04", "1020-01-06 00:00:00");
+ checkParseTimestamp("IYY-IW-ID", "020-01-04", "1020-01-06 00:00:00");
+ checkParseTimestamp("IYY-IW-ID", "20-01-04", "1920-01-01 00:00:00");
+ checkParseTimestamp("IY-IW-ID", "20-01-04", "1920-01-01 00:00:00");
checkParseTimestamp("IYYY-IW-DAY", "2019-01-monday", "2018-12-31 00:00:00");
checkParseTimestamp("IYYY-IW-Day", "2019-01-Sunday", "2019-01-06 00:00:00");
checkParseTimestamp("IYYY-IW-Dy", "2019-02-MON", "2019-01-07 00:00:00");
checkParseTimestamp("IYYY-IW-DY", "2019-52-sun", "2019-12-29 00:00:00");
checkParseTimestamp("IYYY-IW-dy", "2020-01-Mon", "2019-12-30 00:00:00");
- //Tests for these patterns would need changing every decade if done in the above way.
//Thursday of the first week in an ISO year always matches the Gregorian year.
- checkParseTimestampIso("IY-IW-ID", "0-01-04", "iw, yyyy", "01, " + thisYearString.substring(0, 3) + "0");
- checkParseTimestampIso("I-IW-ID", "0-01-04", "iw, yyyy", "01, " + thisYearString.substring(0, 3) + "0");
+ checkParseTimestampIso("IY-IW-ID", "0-01-04", "iw, yyyy", "01, 1970");
+ checkParseTimestampIso("I-IW-ID", "0-01-04", "iw, yyyy", "01, 1970");
//time patterns are allowed; date patterns are not
checkParseTimestamp("IYYY-IW-ID hh24:mi:ss", "2019-01-01 01:02:03", "2018-12-31 01:02:03");
}
- private int getFirstTwoDigits() {
- int thisYear = LocalDateTime.now().getYear();
- int firstTwoDigits = thisYear / 100;
- if (thisYear % 100 < 50) {
- firstTwoDigits -= 1;
- }
- return firstTwoDigits;
- }
-
private void checkParseTimestamp(String pattern, String input, String expectedOutput) {
- formatter = new HiveSqlDateTimeFormatter(pattern, true);
- assertEquals("Parse string to timestamp failed. Pattern: " + pattern,
- Timestamp.valueOf(expectedOutput), formatter.parseTimestamp(input));
+ formatter = new HiveSqlDateTimeFormatter(pattern, true,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
+ assertEquals("Parse string to timestamp failed. Pattern: " + pattern, Timestamp.valueOf(expectedOutput),
+ formatter.parseTimestamp(input));
}
- private void checkParseTimestampIso(String parsePattern, String input, String formatPattern,
- String expectedOutput) {
- formatter = new HiveSqlDateTimeFormatter(parsePattern, true);
+ private void checkParseTimestampIso(String parsePattern, String input, String formatPattern, String expectedOutput) {
+ formatter =
+ new HiveSqlDateTimeFormatter(parsePattern, true,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
Timestamp ts = formatter.parseTimestamp(input);
- formatter = new HiveSqlDateTimeFormatter(formatPattern, false);
+ formatter =
+ new HiveSqlDateTimeFormatter(formatPattern, false,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
assertEquals(expectedOutput, formatter.format(ts));
}
@Test
public void testParseDate() {
-
- String thisYearString = String.valueOf(LocalDateTime.now().getYear());
- int firstTwoDigits = getFirstTwoDigits();
//y
- checkParseDate("y-mm-dd", "0-02-03", thisYearString.substring(0, 3) + "0-02-03");
- checkParseDate("yy-mm-dd", "00-02-03", thisYearString.substring(0, 2) + "00-02-03");
- checkParseDate("yyy-mm-dd", "000-02-03", thisYearString.substring(0, 1) + "000-02-03");
- checkParseDate("yyyy-mm-dd", "000-02-03", thisYearString.substring(0, 1) + "000-02-03");
- checkParseDate("rr-mm-dd", "0-02-03", thisYearString.substring(0, 3) + "0-02-03");
- checkParseDate("rrrr-mm-dd", "000-02-03", thisYearString.substring(0, 1) + "000-02-03");
+ checkParseDate("y-mm-dd", "0-02-03", "1970-02-03");
+ checkParseDate("yy-mm-dd", "00-02-03", "1900-02-03");
+ checkParseDate("yyy-mm-dd", "000-02-03", "1000-02-03");
+ checkParseDate("yyyy-mm-dd", "000-02-03", "1000-02-03");
+ checkParseDate("rr-mm-dd", "0-02-03", "1970-02-03");
+ checkParseDate("rrrr-mm-dd", "000-02-03", "1000-02-03");
//rr, rrrr
- checkParseDate("rr-mm-dd", "00-02-03", firstTwoDigits + 1 + "00-02-03");
- checkParseDate("rr-mm-dd", "49-02-03", firstTwoDigits + 1 + "49-02-03");
- checkParseDate("rr-mm-dd", "50-02-03", firstTwoDigits + "50-02-03");
- checkParseDate("rr-mm-dd", "99-02-03", firstTwoDigits + "99-02-03");
- checkParseDate("rrrr-mm-dd", "00-02-03", firstTwoDigits + 1 + "00-02-03");
- checkParseDate("rrrr-mm-dd", "49-02-03", firstTwoDigits + 1 + "49-02-03");
- checkParseDate("rrrr-mm-dd", "50-02-03", firstTwoDigits + "50-02-03");
- checkParseDate("rrrr-mm-dd", "99-02-03", firstTwoDigits + "99-02-03");
+ checkParseDate("rr-mm-dd", "00-02-03", "2000-02-03");
+ checkParseDate("rr-mm-dd", "49-02-03", "2049-02-03");
+ checkParseDate("rr-mm-dd", "50-02-03", "1950-02-03");
+ checkParseDate("rr-mm-dd", "99-02-03", "1999-02-03");
+ checkParseDate("rrrr-mm-dd", "00-02-03", "2000-02-03");
+ checkParseDate("rrrr-mm-dd", "49-02-03", "2049-02-03");
+ checkParseDate("rrrr-mm-dd", "50-02-03", "1950-02-03");
+ checkParseDate("rrrr-mm-dd", "99-02-03", "1999-02-03");
checkParseDate("yyyy-mm-dd hh mi ss.ff7", "2018/01/01 2.2.2.55", "2018-01-01");
checkParseDate("dd/MonthT/yyyy", "31/AugustT/2020", "2020-08-31");
checkParseDate("dd/MonthT/yyyy", "31/MarchT/2020", "2020-03-31");
+ }
- //ISO 8601
+ @Test
+ public void testParseDateISO8601() {
checkParseDate("IYYY-IW-ID", "2019-01-01", "2018-12-31");
checkParseDate("IW-ID-IYYY", "01-02-2019", "2019-01-01");
checkParseDate("ID-IW-IYYY", "02-01-2019", "2019-01-01");
@@ -352,7 +351,8 @@ public void testParseDate() {
}
private void checkParseDate(String pattern, String input, String expectedOutput) {
- formatter = new HiveSqlDateTimeFormatter(pattern, true);
+ formatter = new HiveSqlDateTimeFormatter(pattern, true,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
assertEquals("Parse string to date failed. Pattern: " + pattern,
Date.valueOf(expectedOutput), formatter.parseDate(input));
}
@@ -382,7 +382,8 @@ public void testParseTimestampError() {
private void verifyBadPattern(String string, boolean forParsing) {
try {
- formatter = new HiveSqlDateTimeFormatter(string, forParsing);
+ formatter = new HiveSqlDateTimeFormatter(string, forParsing,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
fail("Bad pattern " + string + " should have thrown IllegalArgumentException but didn't");
} catch (Exception e) {
assertEquals("Expected IllegalArgumentException, got another exception.",
@@ -446,7 +447,7 @@ public void testFx() {
public void testFmFx() {
checkParseTimestamp("FXDD-FMMM-YYYY hh12 am", "01-1-1998 12 PM", "1998-01-01 12:00:00");
checkParseTimestamp("FXFMDD-MM-YYYY hh12 am", "1-01-1998 12 PM", "1998-01-01 12:00:00");
- checkParseTimestamp("FXFMiyyy-iw-id hh24:mi:ss", "019-01-02 17:00:05", "2019-01-01 17:00:05");
+ checkParseTimestamp("FXFMiyyy-iw-id hh24:mi:ss", "019-01-02 17:00:05", "1019-01-05 17:00:05");
verifyBadParseString("FXFMiyyy-iw-id hh24:mi:ss", "019-01-02 17:0:05");
//ff[1-9] unaffected
checkParseTimestamp("FXFMDD-MM-YYYY FMff2", "1-01-1998 4", "1998-01-01 00:00:00.4");
@@ -469,8 +470,8 @@ public void testText() {
// Characters matter upon parsing
verifyBadParseString("\"Year! \"YYYY \"m\" MM \"d\" DD.\"!\"", "Year 3000 m 3 d 1,!");
// non-numeric characters in text counts as a delimiter
- checkParseDate("yyyy\"m\"mm\"d\"dd", "19m1d1", LocalDate.now().getYear() / 100 + "19-01-01");
- checkParseDate("yyyy\"[\"mm\"]\"dd", "19[1]1", LocalDate.now().getYear() / 100 + "19-01-01");
+ checkParseDate("yyyy\"m\"mm\"d\"dd", "19m1d1", "1919-01-01");
+ checkParseDate("yyyy\"[\"mm\"]\"dd", "19[1]1", "1919-01-01");
// parse character temporals correctly
checkParseDate("dd/Month\"arch\"/yyyy", "31/Marcharch/2020", "2020-03-31");
checkParseDate("dd/Month\"ember\"/yyyy", "31/Decemberember/2020", "2020-12-31");
@@ -502,13 +503,14 @@ public void testText() {
* -sum of token.lengths
* -concatenation of token.strings
*/
- private void verifyPatternParsing(String pattern, ArrayList temporalFields) {
+ private void verifyPatternParsing(String pattern, List temporalFields) {
verifyPatternParsing(pattern, pattern.length(), pattern.toLowerCase(), temporalFields);
}
private void verifyPatternParsing(String pattern, int expectedPatternLength,
- String expectedPattern, ArrayList temporalFields) {
- formatter = new HiveSqlDateTimeFormatter(pattern, false);
+ String expectedPattern, List temporalFields) {
+ formatter = new HiveSqlDateTimeFormatter(pattern, false,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
assertEquals(temporalFields.size(), formatter.getTokens().size());
StringBuilder sb = new StringBuilder();
int actualPatternLength = 0;
@@ -524,7 +526,8 @@ private void verifyPatternParsing(String pattern, int expectedPatternLength,
}
private void verifyBadParseString(String pattern, String string) {
- formatter = new HiveSqlDateTimeFormatter(pattern, true);
+ formatter = new HiveSqlDateTimeFormatter(pattern, true,
+ Optional.of(LocalDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)));
try {
Timestamp output = formatter.parseTimestamp(string);
fail("Parse string to timestamp should have failed.\nString: " + string + "\nPattern: "
diff --git a/contrib/src/test/results/clientpositive/udaf_example_group_concat.q.out b/contrib/src/test/results/clientpositive/udaf_example_group_concat.q.out
index 6846720d95..15dd4c071c 100644
--- a/contrib/src/test/results/clientpositive/udaf_example_group_concat.q.out
+++ b/contrib/src/test/results/clientpositive/udaf_example_group_concat.q.out
@@ -42,7 +42,7 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 526000 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 250 Data size: 526000 Basic stats: COMPLETE Column stats: COMPLETE
diff --git a/data/conf/llap/hive-site.xml b/data/conf/llap/hive-site.xml
index 0c5d030146..d37c1b5dab 100644
--- a/data/conf/llap/hive-site.xml
+++ b/data/conf/llap/hive-site.xml
@@ -373,4 +373,9 @@
org.apache.hadoop.hive.ql.hooks.ScheduledQueryCreationRegistryHook
+
+ hive.users.in.admin.role
+ hive_admin_user
+
+
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 430cc34501..1d7009b5af 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -718,7 +718,7 @@ static int getIntegerProperty(Table table, String propertyName, int defaultVal)
}
String[] vals = values.trim().split(",");
for (String val : vals) {
- if (org.apache.commons.lang.StringUtils.isNotBlank(val)) {
+ if (org.apache.commons.lang3.StringUtils.isNotBlank(val)) {
rv.add(val);
}
}
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
index 2ad6a7f8b0..6cf3ef2562 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
@@ -33,7 +33,7 @@
import org.apache.druid.segment.indexing.granularity.GranularitySpec;
import org.apache.druid.segment.realtime.plumber.CustomVersioningPolicy;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.Constants;
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/security/DruidKerberosUtil.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/security/DruidKerberosUtil.java
index 8e10cd7e20..12603c10ec 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/security/DruidKerberosUtil.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/security/DruidKerberosUtil.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.druid.security;
-import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
@@ -33,7 +32,7 @@
import java.net.CookieStore;
import java.net.HttpCookie;
import java.net.URI;
-import java.nio.charset.StandardCharsets;
+import java.util.Base64;
import java.util.List;
import java.util.concurrent.locks.ReentrantLock;
@@ -42,7 +41,6 @@
*/
public final class DruidKerberosUtil {
protected static final Logger LOG = LoggerFactory.getLogger(DruidKerberosUtil.class);
- private static final Base64 BASE_64_CODEC = new Base64(0);
private static final ReentrantLock KERBEROS_LOCK = new ReentrantLock(true);
private DruidKerberosUtil() {
@@ -78,7 +76,7 @@ static String kerberosChallenge(String server) throws AuthenticationException {
gssContext.dispose();
// Base64 encoded and stringified token for server
LOG.debug("Got valid challenge for host {}", serverName);
- return new String(BASE_64_CODEC.encode(outToken), StandardCharsets.US_ASCII);
+ return Base64.getEncoder().encodeToString(outToken);
} catch (GSSException | IllegalAccessException | NoSuchFieldException | ClassNotFoundException e) {
throw new AuthenticationException(e);
} finally {
diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/ColumnMappings.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/ColumnMappings.java
index f1887b5499..2f09662a4b 100644
--- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/ColumnMappings.java
+++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/ColumnMappings.java
@@ -29,7 +29,7 @@
import java.util.List;
import java.util.Properties;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.SerDeException;
diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
index 4fa0272951..415adca674 100644
--- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
+++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
@@ -27,7 +27,7 @@
import java.util.SortedMap;
import java.util.TreeMap;
-import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.lang3.NotImplementedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index a32ef81a7b..9bd2f49fe5 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -152,7 +152,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: UDFToDouble(_col0) (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: UDFToDouble(_col0) (type: double)
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
@@ -169,7 +169,7 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: UDFToDouble(_col0) (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: UDFToDouble(_col0) (type: double)
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -314,7 +314,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
@@ -330,7 +330,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
@@ -565,7 +565,7 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
@@ -604,7 +604,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col2 (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col2 (type: double)
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
@@ -612,7 +612,7 @@ STAGE PLANS:
TableScan
Reduce Output Operator
key expressions: _col1 (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col1 (type: double)
Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java
index 08e328a173..930e11ea8b 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java
@@ -36,7 +36,7 @@
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.Parser;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.common.io.SessionStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
index 084bbfeee7..9b66e6be74 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
@@ -23,7 +23,7 @@
import java.util.List;
import java.util.Map;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.ddl.DDLDesc;
@@ -72,7 +72,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context,
for (int num = 1; num < numCh; num++) {
ASTNode child = (ASTNode) ast.getChild(num);
if (format.fillStorageFormat(child)) {
- if (org.apache.commons.lang.StringUtils
+ if (org.apache.commons.lang3.StringUtils
.isNotEmpty(format.getStorageHandler())) {
return ast;
}
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
index 865aae6bca..f92478c48b 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
@@ -28,8 +28,8 @@
import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc;
import org.apache.hadoop.hive.ql.ddl.table.info.ShowTableStatusDesc;
import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablesDesc;
-import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc;
-import org.apache.hadoop.hive.ql.ddl.table.partition.ShowPartitionsDesc;
+import org.apache.hadoop.hive.ql.ddl.table.partition.drop.AlterTableDropPartitionDesc;
+import org.apache.hadoop.hive.ql.ddl.table.partition.show.ShowPartitionsDesc;
import org.apache.hadoop.hive.ql.ddl.table.storage.AlterTableSetLocationDesc;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.Utilities;
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
index e851d2a6f4..b0b3276893 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
@@ -37,7 +37,7 @@
import com.google.common.collect.Maps;
import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
index e18dae983b..d786e3c482 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
@@ -30,8 +30,8 @@
import javax.security.auth.login.LoginException;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java
index 350221528e..30af54f115 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java
@@ -20,7 +20,7 @@
import java.io.Serializable;
-import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceStability;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
index 4a76010904..5d0bef4a94 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
@@ -28,7 +28,7 @@
import java.util.Map;
import java.util.Map.Entry;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
index 002f63f7bb..076b4798ef 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
@@ -338,7 +338,7 @@ private static InputJobInfo getJobInfo(Configuration conf)
Iterator pathIterator = pathStrings.iterator();
while (pathIterator.hasNext()) {
String pathString = pathIterator.next();
- if (ignoreInvalidPath && org.apache.commons.lang.StringUtils.isBlank(pathString)) {
+ if (ignoreInvalidPath && org.apache.commons.lang3.StringUtils.isBlank(pathString)) {
continue;
}
Path path = new Path(pathString);
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/MultiOutputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/MultiOutputFormat.java
index 8a080cf0a5..f1eed505a8 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/MultiOutputFormat.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/MultiOutputFormat.java
@@ -32,7 +32,7 @@
import java.util.Map.Entry;
import java.util.Set;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java
index 6dbb0a704c..9d83352df4 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java
@@ -19,7 +19,7 @@
package org.apache.hive.hcatalog.mapreduce;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hive.hcatalog.common.HCatConstants;
import org.apache.hive.hcatalog.common.HCatUtil;
diff --git a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java
index 994c505d64..422342c947 100644
--- a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java
+++ b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java
@@ -31,7 +31,7 @@
import java.util.Map.Entry;
import java.util.Properties;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.Date;
import org.apache.hadoop.hive.common.type.HiveChar;
diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/jms/MessagingUtils.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/jms/MessagingUtils.java
index 5ab5f513b2..8c8a8ce934 100644
--- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/jms/MessagingUtils.java
+++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/jms/MessagingUtils.java
@@ -19,7 +19,7 @@
package org.apache.hive.hcatalog.messaging.jms;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hive.hcatalog.common.HCatConstants;
import org.apache.hive.hcatalog.messaging.HCatEventMessage;
import org.apache.hive.hcatalog.messaging.MessageFactory;
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
index 6a9a47e85c..28406d38e8 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
@@ -23,7 +23,7 @@
import java.util.List;
import java.util.Properties;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Table;
diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
index 1cb35bbd06..b73b6fe524 100644
--- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
+++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
@@ -28,7 +28,7 @@
import javax.annotation.Nullable;
import org.apache.commons.lang3.tuple.Pair;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceStability;
diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java
index ed2aef4758..ee60556a80 100644
--- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java
+++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java
@@ -25,7 +25,7 @@
import java.util.Map;
import com.google.common.collect.Maps;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceStability;
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HcatDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HcatDelegator.java
index e6afd69563..ad447a7cf1 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HcatDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HcatDelegator.java
@@ -27,7 +27,7 @@
import javax.ws.rs.core.Response;
import org.apache.commons.exec.ExecuteException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileStatus;
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Copy.java b/hplsql/src/main/java/org/apache/hive/hplsql/Copy.java
index e562656282..d420e5ca17 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Copy.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Copy.java
@@ -35,7 +35,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hive.hplsql.Var;
import org.antlr.v4.runtime.ParserRuleContext;
-import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.tuple.Pair;
public class Copy {
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/functions/Function.java b/hplsql/src/main/java/org/apache/hive/hplsql/functions/Function.java
index 279dd2048f..3554dac6ed 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/functions/Function.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/functions/Function.java
@@ -30,7 +30,7 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.antlr.v4.runtime.ParserRuleContext;
import org.apache.hive.hplsql.*;
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionDatetime.java b/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionDatetime.java
index 373dd70da7..521c478af7 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionDatetime.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionDatetime.java
@@ -24,7 +24,7 @@
import java.util.Date;
import java.util.TimeZone;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hive.hplsql.*;
public class FunctionDatetime extends Function {
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 698ded25f3..26c4937a28 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -1375,7 +1375,7 @@ public int deleteScheduledExecutions(int maxRetainSecs) {
@Override
- public int markScheduledExecutionsTimedOut(int timeoutSecs) throws InvalidOperationException{
+ public int markScheduledExecutionsTimedOut(int timeoutSecs) throws InvalidOperationException, MetaException {
return objectStore.markScheduledExecutionsTimedOut(timeoutSecs);
}
}
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
index e997fa65cf..2addf92c22 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
@@ -342,7 +342,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java
new file mode 100644
index 0000000000..51bb78733a
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java
@@ -0,0 +1,1434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.thrift.TException;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * TestMetaStoreAuthorization.
+ */
+public class TestMetaStoreMultipleEncryptionZones {
+ private static HiveMetaStoreClient client;
+ private static HiveConf hiveConf;
+ private static Configuration conf;
+ private static Warehouse warehouse;
+ private static FileSystem warehouseFs;
+ private static MiniDFSCluster miniDFSCluster;
+ private static String cmroot;
+ private static FileSystem fs;
+ private static HadoopShims.HdfsEncryptionShim shimCm;
+ private static String cmrootEncrypted;
+ private static String jksFile = System.getProperty("java.io.tmpdir") + "/test.jks";
+ private static String cmrootFallBack;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ //Create secure cluster
+ conf = new Configuration();
+ conf.set("hadoop.security.key.provider.path", "jceks://file" + jksFile);
+ miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
+ DFSTestUtil.createKey("test_key_cm", miniDFSCluster, conf);
+ DFSTestUtil.createKey("test_key_db", miniDFSCluster, conf);
+ hiveConf = new HiveConf(TestReplChangeManager.class);
+ hiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true);
+ hiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60);
+ hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname,
+ "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort()
+ + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal);
+
+ cmroot = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmroot";
+ cmrootFallBack = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootFallback";
+ cmrootEncrypted = "cmrootEncrypted";
+ hiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmroot);
+ hiveConf.set(HiveConf.ConfVars.REPLCMENCRYPTEDDIR.varname, cmrootEncrypted);
+ hiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmrootFallBack);
+ initReplChangeManager();
+ //Create cm in encrypted zone
+ shimCm = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf);
+
+ try {
+ client = new HiveMetaStoreClient(hiveConf);
+ } catch (Throwable e) {
+ System.err.println("Unable to open the metastore");
+ System.err.println(StringUtils.stringifyException(e));
+ throw e;
+ }
+ }
+
+ private static void initReplChangeManager() throws Exception{
+ warehouse = new Warehouse(hiveConf);
+ warehouseFs = warehouse.getWhRoot().getFileSystem(hiveConf);
+ fs = new Path(cmroot).getFileSystem(hiveConf);
+ fs.mkdirs(warehouse.getWhRoot());
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ try {
+ miniDFSCluster.shutdown();
+ client.close();
+ } catch (Throwable e) {
+ System.err.println("Unable to close metastore");
+ System.err.println(StringUtils.stringifyException(e));
+ throw e;
+ }
+ }
+
+ @Test
+ public void dropTableWithDifferentEncryptionZonesDifferentKey() throws Throwable {
+ String dbName1 = "encrdbdiffkey1";
+ String dbName2 = "encrdbdiffkey2";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+
+ silentDropDatabase(dbName1);
+ silentDropDatabase(dbName2);
+ new DatabaseBuilder()
+ .setName(dbName1)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ new DatabaseBuilder()
+ .setName(dbName2)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ Path dirDb1 = new Path(warehouse.getWhRoot(), dbName1 +".db");
+ warehouseFs.delete(dirDb1, true);
+ warehouseFs.mkdirs(dirDb1);
+ shimCm.createEncryptionZone(dirDb1, "test_key_db");
+ Path dirTbl1 = new Path(dirDb1, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirDb2 = new Path(warehouse.getWhRoot(), dbName2 +".db");
+ warehouseFs.delete(dirDb2, true);
+ warehouseFs.mkdirs(dirDb2);
+ shimCm.createEncryptionZone(dirDb2, "test_key_cm");
+ Path dirTbl2 = new Path(dirDb2, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ new TableBuilder()
+ .setDbName(dbName1)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName1, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName2)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName1, tblName1);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName1, tblName1);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ exceptionThrown = false;
+ try {
+ client.dropTable(dbName2, tblName2);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ try {
+ client.getTable(dbName2, tblName2);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+
+
+ }
+
+ @Test
+ public void dropTableWithTableAtEncryptionZoneRoot() throws Throwable {
+ String dbName = "encrdbroot";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+
+ silentDropDatabase(dbName);
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ shimCm.createEncryptionZone(dirTbl1, "test_key_db");
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirTbl2 = new Path(dirDb, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ shimCm.createEncryptionZone(dirTbl2, "test_key_cm");
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName1);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName1);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName2);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ try {
+ client.getTable(dbName, tblName2);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ assertTrue(warehouseFs.exists(new Path(dirTbl1, cmrootEncrypted)));
+ assertTrue(warehouseFs.exists(new Path(dirTbl2, cmrootEncrypted)));
+ }
+
+ @Test
+ public void dropTableWithDifferentEncryptionZonesSameKey() throws Throwable {
+ String dbName1 = "encrdbsamekey1";
+ String dbName2 = "encrdbsamekey2";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+
+ silentDropDatabase(dbName1);
+ silentDropDatabase(dbName2);
+ new DatabaseBuilder()
+ .setName(dbName1)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ new DatabaseBuilder()
+ .setName(dbName2)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ Path dirDb1 = new Path(warehouse.getWhRoot(), dbName1 +".db");
+ warehouseFs.mkdirs(dirDb1);
+ shimCm.createEncryptionZone(dirDb1, "test_key_db");
+ Path dirTbl1 = new Path(dirDb1, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirDb2 = new Path(warehouse.getWhRoot(), dbName2 +".db");
+ warehouseFs.mkdirs(dirDb2);
+ shimCm.createEncryptionZone(dirDb2, "test_key_db");
+ Path dirTbl2 = new Path(dirDb2, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ new TableBuilder()
+ .setDbName(dbName1)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName1, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName2)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName1, tblName1);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName1, tblName1);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ exceptionThrown = false;
+ try {
+ client.dropTable(dbName2, tblName2);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ try {
+ client.getTable(dbName2, tblName2);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+
+ }
+
+ @Test
+ public void dropTableWithSameEncryptionZones() throws Throwable {
+ String dbName = "encrdb3";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+ silentDropDatabase(dbName);
+
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.delete(dirDb, true);
+ warehouseFs.mkdirs(dirDb);
+ shimCm.createEncryptionZone(dirDb, "test_key_db");
+ Path dirTbl1 = new Path(dirDb, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirTbl2 = new Path(dirDb, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName1);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName1);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName2);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ try {
+ client.getTable(dbName, tblName2);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ }
+
+ @Test
+ public void dropTableWithoutEncryptionZonesForCm() throws Throwable {
+ String dbName = "simpdb1";
+ String tblName = "simptbl";
+ String typeName = "Person";
+ silentDropDatabase(dbName);
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName, tblName);
+ Assert.assertNotNull(tbl);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, tblName);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName);
+ } catch (Exception e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ }
+
+ @Test
+ public void dropExternalTableWithSameEncryptionZonesForCm() throws Throwable {
+ String dbName = "encrdb4";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+ silentDropDatabase(dbName);
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addTableParam("EXTERNAL", "true")
+ .addTableParam("external.table.purge", "true")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addTableParam("EXTERNAL", "true")
+ .addTableParam("external.table.purge", "true")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.delete(dirDb, true);
+ warehouseFs.mkdirs(dirDb);
+ shimCm.createEncryptionZone(dirDb, "test_key_db");
+ Path dirTbl1 = new Path(dirDb, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirTbl2 = new Path(dirDb, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName1);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName1);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName2);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName2);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ }
+
+ @Test
+ public void dropExternalTableWithDifferentEncryptionZones() throws Throwable {
+ String dbName = "encrdb5";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+
+ silentDropDatabase(dbName);
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addTableParam("EXTERNAL", "true")
+ .addTableParam("external.table.purge", "true")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addTableParam("EXTERNAL", "true")
+ .addTableParam("external.table.purge", "true")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ shimCm.createEncryptionZone(dirTbl1, "test_key_db");
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirTbl2 = new Path(dirDb, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ shimCm.createEncryptionZone(dirTbl2, "test_key_db");
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName1);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName1);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName2);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ try {
+ client.getTable(dbName, tblName2);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ }
+
+ @Test
+ public void dropExternalTableWithDifferentEncryptionZonesDifferentKey() throws Throwable {
+ String dbName = "encrdb6";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+
+ silentDropDatabase(dbName);
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addTableParam("EXTERNAL", "true")
+ .addTableParam("external.table.purge", "true")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addTableParam("EXTERNAL", "true")
+ .addTableParam("external.table.purge", "true")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ shimCm.createEncryptionZone(dirTbl1, "test_key_db");
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirTbl2 = new Path(dirDb, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ shimCm.createEncryptionZone(dirTbl2, "test_key_cm");
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName1);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName1);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName2);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ try {
+ client.getTable(dbName, tblName2);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ }
+
+ @Test
+ public void dropExternalTableWithoutEncryptionZonesForCm() throws Throwable {
+ String dbName = "simpdb2";
+ String tblName = "simptbl";
+ String typeName = "Person";
+ silentDropDatabase(dbName);
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addTableParam("EXTERNAL", "true")
+ .addTableParam("external.table.purge", "true")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName, tblName);
+ Assert.assertNotNull(tbl);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, tblName);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ boolean exceptionThrown = false;
+ try {
+ client.dropTable(dbName, tblName);
+ } catch (Exception e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue(exceptionThrown);
+ }
+
+ @Test
+ public void truncateTableWithDifferentEncryptionZones() throws Throwable {
+ String dbName1 = "encrdbtrunc1";
+ String dbName2 = "encrdbtrunc2";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+
+ silentDropDatabase(dbName1);
+ silentDropDatabase(dbName2);
+ new DatabaseBuilder()
+ .setName(dbName1)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ new DatabaseBuilder()
+ .setName(dbName2)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ Path dirDb1 = new Path(warehouse.getWhRoot(), dbName1 +".db");
+ warehouseFs.delete(dirDb1, true);
+ warehouseFs.mkdirs(dirDb1);
+ shimCm.createEncryptionZone(dirDb1, "test_key_db");
+ Path dirTbl1 = new Path(dirDb1, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirDb2 = new Path(warehouse.getWhRoot(), dbName2 +".db");
+ warehouseFs.delete(dirDb2, true);
+ warehouseFs.mkdirs(dirDb2);
+ shimCm.createEncryptionZone(dirDb2, "test_key_db");
+ Path dirTbl2 = new Path(dirDb2, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ new TableBuilder()
+ .setDbName(dbName1)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName1, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName2)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ boolean exceptionThrown = false;
+ try {
+ client.truncateTable(dbName1, tblName1, null);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ assertNotNull(client.getTable(dbName1, tblName1));
+ exceptionThrown = false;
+ try {
+ client.truncateTable(dbName2, tblName2, null);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ assertNotNull(client.getTable(dbName2, tblName2));
+ }
+
+ @Test
+ public void truncateTableWithDifferentEncryptionZonesDifferentKey() throws Throwable {
+ String dbName1 = "encrdb1";
+ String dbName2 = "encrdb2";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+
+ silentDropDatabase(dbName1);
+ silentDropDatabase(dbName2);
+ new DatabaseBuilder()
+ .setName(dbName1)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ new DatabaseBuilder()
+ .setName(dbName2)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ Path dirDb1 = new Path(warehouse.getWhRoot(), dbName1 +".db");
+ warehouseFs.mkdirs(dirDb1);
+ shimCm.createEncryptionZone(dirDb1, "test_key_db");
+ Path dirTbl1 = new Path(dirDb1, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirDb2 = new Path(warehouse.getWhRoot(), dbName2 +".db");
+ warehouseFs.mkdirs(dirDb2);
+ shimCm.createEncryptionZone(dirDb2, "test_key_db");
+ Path dirTbl2 = new Path(dirDb2, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ new TableBuilder()
+ .setDbName(dbName1)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName1, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName2)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ boolean exceptionThrown = false;
+ try {
+ client.truncateTable(dbName1, tblName1, null);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ assertNotNull(client.getTable(dbName1, tblName1));
+ exceptionThrown = false;
+ try {
+ client.truncateTable(dbName2, tblName2, null);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ assertNotNull(client.getTable(dbName2, tblName2));
+ }
+
+ @Test
+ public void truncateTableWithSameEncryptionZones() throws Throwable {
+ String dbName = "encrdb9";
+ String tblName1 = "encrtbl1";
+ String tblName2 = "encrtbl2";
+ String typeName = "Person";
+ client.dropTable(dbName, tblName1);
+ client.dropTable(dbName, tblName2);
+ silentDropDatabase(dbName);
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName1)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl = client.getTable(dbName, tblName1);
+ Assert.assertNotNull(tbl);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName2)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.delete(dirDb, true);
+ warehouseFs.mkdirs(dirDb);
+ shimCm.createEncryptionZone(dirDb, "test_key_db");
+ Path dirTbl1 = new Path(dirDb, tblName1);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ Path dirTbl2 = new Path(dirDb, tblName2);
+ warehouseFs.mkdirs(dirTbl2);
+ Path part12 = new Path(dirTbl2, "part1");
+ createFile(part12, "testClearer12");
+
+ boolean exceptionThrown = false;
+ try {
+ client.truncateTable(dbName, tblName1, null);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName1);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+
+ try {
+ client.truncateTable(dbName, tblName2, null);
+ } catch (MetaException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part12));
+ try {
+ client.getTable(dbName, tblName2);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ }
+
+ @Test
+ public void truncateTableWithoutEncryptionZonesForCm() throws Throwable {
+ String dbName = "simpdb3";
+ String tblName = "simptbl";
+ String typeName = "Person";
+ client.dropTable(dbName, tblName);
+ silentDropDatabase(dbName);
+
+ new DatabaseBuilder()
+ .setName(dbName)
+ .addParam("repl.source.for", "1, 2, 3")
+ .create(client, hiveConf);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName)
+ .setCols(typ1.getFields())
+ .setNumBuckets(1)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+ .create(client, hiveConf);
+
+ Table tbl2 = client.getTable(dbName, tblName);
+ Assert.assertNotNull(tbl2);
+
+ Path dirDb = new Path(warehouse.getWhRoot(), dbName +".db");
+ warehouseFs.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, tblName);
+ warehouseFs.mkdirs(dirTbl1);
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ boolean exceptionThrown = false;
+ try {
+ client.truncateTable(dbName, tblName, null);
+ } catch (Exception e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ assertFalse(warehouseFs.exists(part11));
+ try {
+ client.getTable(dbName, tblName);
+ } catch (NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ }
+
+ @Test
+ public void recycleFailureWithDifferentEncryptionZonesForCm() throws Throwable {
+ Path dirDb = new Path(warehouse.getWhRoot(), "db2");
+ warehouseFs.delete(dirDb, true);
+ warehouseFs.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, "tbl1");
+ warehouseFs.mkdirs(dirTbl1);
+ shimCm.createEncryptionZone(dirTbl1, "test_key_db");
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ boolean exceptionThrown = false;
+ try {
+ ReplChangeManager.getInstance(hiveConf).recycle(dirTbl1, RecycleType.MOVE, false);
+ } catch (RemoteException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+ }
+
+ @Test
+ public void testClearerEncrypted() throws Exception {
+ HiveConf hiveConfCmClearer = new HiveConf(TestReplChangeManager.class);
+ hiveConfCmClearer.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true);
+ hiveConfCmClearer.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60);
+ hiveConfCmClearer.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname,
+ "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort()
+ + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal);
+
+ String cmrootCmClearer = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootClearer";
+ hiveConfCmClearer.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootCmClearer);
+ Warehouse warehouseCmClearer = new Warehouse(hiveConfCmClearer);
+ FileSystem cmfs = new Path(cmrootCmClearer).getFileSystem(hiveConfCmClearer);
+ cmfs.mkdirs(warehouseCmClearer.getWhRoot());
+
+ HadoopShims.HdfsEncryptionShim shimCmEncrypted = ShimLoader.getHadoopShims().createHdfsEncryptionShim(cmfs, conf);
+
+ FileSystem fsWarehouse = warehouseCmClearer.getWhRoot().getFileSystem(hiveConfCmClearer);
+ long now = System.currentTimeMillis();
+ Path dirDb = new Path(warehouseCmClearer.getWhRoot(), "db1");
+ fsWarehouse.delete(dirDb, true);
+ fsWarehouse.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, "tbl1");
+ fsWarehouse.mkdirs(dirTbl1);
+ shimCmEncrypted.createEncryptionZone(dirTbl1, "test_key_db");
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+ String fileChksum11 = ReplChangeManager.checksumFor(part11, fsWarehouse);
+ Path part12 = new Path(dirTbl1, "part2");
+ createFile(part12, "testClearer12");
+ String fileChksum12 = ReplChangeManager.checksumFor(part12, fsWarehouse);
+ Path dirTbl2 = new Path(dirDb, "tbl2");
+ fsWarehouse.mkdirs(dirTbl2);
+ shimCmEncrypted.createEncryptionZone(dirTbl2, "test_key_db");
+ Path part21 = new Path(dirTbl2, "part1");
+ createFile(part21, "testClearer21");
+ String fileChksum21 = ReplChangeManager.checksumFor(part21, fsWarehouse);
+ Path part22 = new Path(dirTbl2, "part2");
+ createFile(part22, "testClearer22");
+ String fileChksum22 = ReplChangeManager.checksumFor(part22, fsWarehouse);
+ Path dirTbl3 = new Path(dirDb, "tbl3");
+ fsWarehouse.mkdirs(dirTbl3);
+ shimCmEncrypted.createEncryptionZone(dirTbl3, "test_key_cm");
+ Path part31 = new Path(dirTbl3, "part1");
+ createFile(part31, "testClearer31");
+ String fileChksum31 = ReplChangeManager.checksumFor(part31, fsWarehouse);
+ Path part32 = new Path(dirTbl3, "part2");
+ createFile(part32, "testClearer32");
+ String fileChksum32 = ReplChangeManager.checksumFor(part32, fsWarehouse);
+
+ ReplChangeManager.getInstance(hiveConfCmClearer).recycle(dirTbl1, RecycleType.MOVE, false);
+ ReplChangeManager.getInstance(hiveConfCmClearer).recycle(dirTbl2, RecycleType.MOVE, false);
+ ReplChangeManager.getInstance(hiveConfCmClearer).recycle(dirTbl3, RecycleType.MOVE, true);
+
+ assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part11.getName(), fileChksum11,
+ ReplChangeManager.getCmRoot(part11).toString())));
+ assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part12.getName(), fileChksum12,
+ ReplChangeManager.getCmRoot(part12).toString())));
+ assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part21.getName(), fileChksum21,
+ ReplChangeManager.getCmRoot(part21).toString())));
+ assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part22.getName(), fileChksum22,
+ ReplChangeManager.getCmRoot(part22).toString())));
+ assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part31.getName(), fileChksum31,
+ ReplChangeManager.getCmRoot(part31).toString())));
+ assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part32.getName(), fileChksum32,
+ ReplChangeManager.getCmRoot(part32).toString())));
+
+ fsWarehouse.setTimes(ReplChangeManager.getCMPath(hiveConfCmClearer, part11.getName(), fileChksum11,
+ ReplChangeManager.getCmRoot(part11).toString()),
+ now - 86400*1000*2, now - 86400*1000*2);
+ fsWarehouse.setTimes(ReplChangeManager.getCMPath(hiveConfCmClearer, part21.getName(), fileChksum21,
+ ReplChangeManager.getCmRoot(part21).toString()),
+ now - 86400*1000*2, now - 86400*1000*2);
+ fsWarehouse.setTimes(ReplChangeManager.getCMPath(hiveConfCmClearer, part31.getName(), fileChksum31,
+ ReplChangeManager.getCmRoot(part31).toString()),
+ now - 86400*1000*2, now - 86400*1000*2);
+ fsWarehouse.setTimes(ReplChangeManager.getCMPath(hiveConfCmClearer, part32.getName(), fileChksum32,
+ ReplChangeManager.getCmRoot(part32).toString()),
+ now - 86400*1000*2, now - 86400*1000*2);
+
+ ReplChangeManager.scheduleCMClearer(hiveConfCmClearer);
+
+ long start = System.currentTimeMillis();
+ long end;
+ boolean cleared = false;
+ do {
+ Thread.sleep(200);
+ end = System.currentTimeMillis();
+ if (end - start > 5000) {
+ Assert.fail("timeout, cmroot has not been cleared");
+ }
+ if (!fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part11.getName(), fileChksum11,
+ ReplChangeManager.getCmRoot(part11).toString())) &&
+ fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part12.getName(), fileChksum12,
+ ReplChangeManager.getCmRoot(part12).toString())) &&
+ !fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part21.getName(), fileChksum21,
+ ReplChangeManager.getCmRoot(part21).toString())) &&
+ fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part22.getName(), fileChksum22,
+ ReplChangeManager.getCmRoot(part22).toString())) &&
+ !fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part31.getName(), fileChksum31,
+ ReplChangeManager.getCmRoot(part31).toString())) &&
+ !fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfCmClearer, part32.getName(), fileChksum32,
+ ReplChangeManager.getCmRoot(part32).toString()))) {
+ cleared = true;
+ }
+ } while (!cleared);
+ }
+
+ @Test
+ public void testCmrootEncrypted() throws Exception {
+ HiveConf encryptedHiveConf = new HiveConf(TestReplChangeManager.class);
+ encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true);
+ encryptedHiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60);
+ encryptedHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname,
+ "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort()
+ + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal);
+
+ String cmrootdirEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmroot";
+ encryptedHiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootdirEncrypted);
+ encryptedHiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmrootFallBack);
+
+ //Create cm in encrypted zone
+ HadoopShims.HdfsEncryptionShim shimCmEncrypted = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf);
+ shimCmEncrypted.createEncryptionZone(new Path(cmrootdirEncrypted), "test_key_db");
+ ReplChangeManager.resetReplChangeManagerInstance();
+ Warehouse warehouseEncrypted = new Warehouse(encryptedHiveConf);
+ FileSystem warehouseFsEncrypted = warehouseEncrypted.getWhRoot().getFileSystem(encryptedHiveConf);
+ FileSystem fsCmEncrypted = new Path(cmrootdirEncrypted).getFileSystem(encryptedHiveConf);
+ fsCmEncrypted.mkdirs(warehouseEncrypted.getWhRoot());
+
+ Path dirDb = new Path(warehouseEncrypted.getWhRoot(), "db3");
+ warehouseFsEncrypted.delete(dirDb, true);
+ warehouseFsEncrypted.mkdirs(dirDb);
+ Path dirTbl1 = new Path(dirDb, "tbl1");
+ warehouseFsEncrypted.mkdirs(dirTbl1);
+ shimCmEncrypted.createEncryptionZone(dirTbl1, "test_key_db");
+ Path part11 = new Path(dirTbl1, "part1");
+ createFile(part11, "testClearer11");
+
+ boolean exceptionThrown = false;
+ try {
+ ReplChangeManager.getInstance(encryptedHiveConf).recycle(dirTbl1, RecycleType.MOVE, false);
+ } catch (RemoteException e) {
+ exceptionThrown = true;
+ assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
+ }
+ assertFalse(exceptionThrown);
+
+ Path dirDbUnEncrypted = new Path(warehouseEncrypted.getWhRoot(), "db3en");
+ warehouseFsEncrypted.delete(dirDbUnEncrypted, true);
+ warehouseFsEncrypted.mkdirs(dirDbUnEncrypted);
+ Path dirTblun1 = new Path(dirDbUnEncrypted, "tbl1");
+ warehouseFsEncrypted.mkdirs(dirTblun1);
+ Path partun11 = new Path(dirTblun1, "part1");
+ createFile(partun11, "testClearer11");
+
+ exceptionThrown = false;
+ try {
+ ReplChangeManager.getInstance(encryptedHiveConf).recycle(dirDbUnEncrypted, RecycleType.MOVE, false);
+ } catch (IOException e) {
+ exceptionThrown = true;
+ }
+ assertFalse(exceptionThrown);
+ ReplChangeManager.resetReplChangeManagerInstance();
+ initReplChangeManager();
+ }
+
+
+ private void createFile(Path path, String content) throws IOException {
+ FSDataOutputStream output = path.getFileSystem(hiveConf).create(path);
+ output.writeChars(content);
+ output.close();
+ }
+
+ private void silentDropDatabase(String dbName) throws TException {
+ try {
+ for (String tableName : client.getTables(dbName, "*")) {
+ client.dropTable(dbName, tableName);
+ }
+ client.dropDatabase(dbName);
+ } catch (NoSuchObjectException|InvalidOperationException e) {
+ // NOP
+ }
+ }
+}
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
index 5ab4f91486..d3891cf9bb 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
@@ -271,6 +271,7 @@ public void testClearer() throws Exception {
FileSystem fs = warehouse.getWhRoot().getFileSystem(hiveConf);
long now = System.currentTimeMillis();
Path dirDb = new Path(warehouse.getWhRoot(), "db3");
+ fs.delete(dirDb, true);
fs.mkdirs(dirDb);
Path dirTbl1 = new Path(dirDb, "tbl1");
fs.mkdirs(dirTbl1);
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
index 562b2c9763..19d38d2f04 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
@@ -48,6 +48,8 @@ public void setUp() throws Exception {
MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CACHE_CAN_USE_EVENT, true);
MetastoreConf.setBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED, true);
MetastoreConf.setBoolVar(conf, ConfVars.AGGREGATE_STATS_CACHE_ENABLED, false);
+ MetastoreConf.setBoolVar(conf, ConfVars.REPLCMENABLED, true);
+ MetastoreConf.setVar(conf, ConfVars.REPLCMDIR, "cmroot");
MetaStoreTestUtils.setConfForStandloneMode(conf);
hmsHandler = new HiveMetaStore.HMSHandler("testCachedStore", conf, true);
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
index 286842798d..056cd27496 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
@@ -699,7 +699,8 @@ public void testBucketedAcidInsertWithRemoveUnion() throws Exception {
setupTez(confForTez);
int[][] values = {{1,2},{2,4},{5,6},{6,8},{9,10}};
runStatementOnDriver("delete from " + Table.ACIDTBL, confForTez);
- runStatementOnDriver("insert into " + Table.ACIDTBL + TestTxnCommands2.makeValuesClause(values));//make sure both buckets are not empty
+ //make sure both buckets are not empty
+ runStatementOnDriver("insert into " + Table.ACIDTBL + TestTxnCommands2.makeValuesClause(values), confForTez);
runStatementOnDriver("drop table if exists T", confForTez);
/*
With bucketed target table Union All is not removed
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java
index f6035fa01e..96aeb0f12c 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java
@@ -63,5 +63,6 @@ public void testAlterTableOwner() throws HiveException, CommandProcessorExceptio
table = Hive.get(conf).getTable("t1");
assertEquals(PrincipalType.ROLE, table.getOwnerType());
assertEquals("r1", table.getOwner());
+
}
}
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index 0d64780f96..39d876802a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -60,7 +60,7 @@
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.IDriver;
import org.apache.hadoop.hive.ql.ddl.DDLTask;
-import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc;
+import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc;
import org.apache.hadoop.hive.ql.exec.MoveTask;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
index 897a4014e9..43effeb64e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hive.ql.parse;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
index c9bf4b59bd..82d3db5910 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
@@ -21,7 +21,7 @@
import java.io.PrintStream;
import java.util.UUID;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
index 7afef0fce6..4c01311117 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.txn.compactor;
import java.io.File;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -31,19 +32,25 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
import org.apache.hadoop.hive.ql.DriverFactory;
import org.apache.hadoop.hive.ql.IDriver;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hive.streaming.HiveStreamingConnection;
import org.apache.hive.streaming.StreamingConnection;
@@ -55,6 +62,8 @@
import static org.apache.hadoop.hive.ql.txn.compactor.TestCompactor.executeStatementOnDriver;
import static org.apache.hadoop.hive.ql.txn.compactor.CompactorTestUtil.executeStatementOnDriverAndReturnResults;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
@SuppressWarnings("deprecation")
public class TestCrudCompactorOnTez {
@@ -923,6 +932,44 @@ public void testCompactionWithSchemaEvolutionNoBucketsMultipleReducers() throws
executeStatementOnDriver("drop table " + tblName, driver);
}
+ /**
+ * Tests whether hive.llap.io.etl.skip.format config is handled properly whenever QueryCompactor#runCompactionQueries
+ * is invoked.
+ * @throws Exception
+ */
+ @Test
+ public void testLlapCacheOffDuringCompaction() throws Exception {
+ // Setup
+ QueryCompactor qc = new QueryCompactor() {
+ @Override
+ void runCompaction(HiveConf hiveConf, Table table, Partition partition, StorageDescriptor storageDescriptor,
+ ValidWriteIdList writeIds, CompactionInfo compactionInfo) throws IOException {
+ }
+
+ @Override
+ protected void commitCompaction(String dest, String tmpTableName, HiveConf conf, ValidWriteIdList actualWriteIds,
+ long compactorTxnId) throws IOException, HiveException {
+ }
+ };
+ StorageDescriptor sdMock = mock(StorageDescriptor.class);
+ doAnswer(invocationOnMock -> {
+ return null;
+ }).when(sdMock).getLocation();
+ List emptyQueries = new ArrayList<>();
+ HiveConf hiveConf = new HiveConf();
+ hiveConf.set(ValidTxnList.VALID_TXNS_KEY, "8:9223372036854775807::");
+
+ // Check for default case.
+ qc.runCompactionQueries(hiveConf, null, sdMock, null, null, emptyQueries, emptyQueries, emptyQueries);
+ Assert.assertEquals("all", hiveConf.getVar(HiveConf.ConfVars.LLAP_IO_ETL_SKIP_FORMAT));
+
+ // Check for case where hive.llap.io.etl.skip.format is explicitly set to none - as to always use cache.
+ hiveConf.setVar(HiveConf.ConfVars.LLAP_IO_ETL_SKIP_FORMAT, "none");
+ qc.runCompactionQueries(hiveConf, null, sdMock, null, null, emptyQueries, emptyQueries, emptyQueries);
+ Assert.assertEquals("none", hiveConf.getVar(HiveConf.ConfVars.LLAP_IO_ETL_SKIP_FORMAT));
+
+ }
+
private class TestDataProvider {
private void createTable(String tblName, boolean isPartitioned, boolean isBucketed) throws Exception {
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
index 18b5410dc4..04dd0ddad2 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
@@ -45,7 +45,7 @@
import com.google.common.base.Function;
import com.google.common.collect.Lists;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/schematool/TestSchemaTool.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/schematool/TestSchemaTool.java
index e0b93f36ba..1f0006ce19 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/schematool/TestSchemaTool.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/schematool/TestSchemaTool.java
@@ -24,7 +24,7 @@
import java.io.IOException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper;
import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.NestedScriptParser;
import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.PostgresCommandParser;
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 92a0bbe806..7e0a7f2022 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -202,6 +202,8 @@ public static void setUpBeforeClass() throws SQLException, ClassNotFoundExceptio
System.setProperty(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname,
"org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider");
System.setProperty(ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION.varname, "false");
+ System.setProperty(ConfVars.REPLCMENABLED.varname, "true");
+ System.setProperty(ConfVars.REPLCMDIR.varname, "cmroot");
con = getConnection(defaultDbName + ";create=true");
Statement stmt = con.createStatement();
assertNotNull("Statement is null", stmt);
@@ -2828,6 +2830,8 @@ public void testGetQueryLogForReplCommands() throws Exception {
stmt.execute("set hive.metastore.transactional.event.listeners =" +
" org.apache.hive.hcatalog.listener.DbNotificationListener");
stmt.execute("set hive.metastore.dml.events = true");
+ stmt.execute("set hive.repl.cm.enabled = true");
+ stmt.execute("set hive.repl.cmrootdir = cmroot");
stmt.execute("create database " + primaryDb + " with dbproperties('repl.source.for'='1,2,3')");
stmt.execute("create table " + primaryTblName + " (id int)");
stmt.execute("insert into " + primaryTblName + " values (1), (2)");
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 03a1926440..79beadd6fb 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -58,7 +58,7 @@
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -106,6 +106,7 @@ public static void setupBeforeClass() throws Exception {
HiveConf conf = new HiveConf();
dataFileDir = conf.get("test.data.files").replace('\\', '/').replace("c:", "");
kvDataFilePath = new Path(dataFileDir, "kv1.txt");
+
try {
startMiniHS2(conf);
} catch (Exception e) {
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
index 6972ddef9b..d4d53d4d2d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
@@ -24,6 +24,7 @@
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import org.apache.hadoop.hive.common.LogUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.log.HushableRandomAccessFileAppender;
import org.apache.hadoop.hive.ql.log.LogDivertAppender;
@@ -132,22 +133,47 @@ private void appendHushableRandomAccessFileAppender(Appender queryAppender) {
}
}
- @Test
- public void testSwitchLogLayout() throws Exception {
+ private void executeWithOperationLog(String query, boolean queryLogEnabled) throws Exception {
// verify whether the sql operation log is generated and fetch correctly.
- OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
+ OperationHandle operationHandle = client.executeStatement(sessionHandle, query, null);
RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
- FetchType.LOG);
- String queryId = getQueryId(rowSetLog);
- Assert.assertNotNull("Could not find query id, perhaps a logging message changed", queryId);
+ FetchType.LOG);
+ String queryId = "";
+ boolean expectedStopped = true;
+ if (queryLogEnabled) {
+ queryId = getQueryId(rowSetLog);
+ expectedStopped = false;
+ Assert.assertNotNull("Could not find query id, perhaps a logging message changed", queryId);
+ } else {
+ Assert.assertEquals("Operation log is generated even if query logging is disabled", rowSetLog.numRows(), 0);
+ Assert.assertNull("Query id present even if logging is disabled.", getQueryId(rowSetLog));
+ }
- checkAppenderState("before operation close ", LogDivertAppender.QUERY_ROUTING_APPENDER, queryId, false);
- checkAppenderState("before operation close ", LogDivertAppenderForTest.TEST_QUERY_ROUTING_APPENDER, queryId, false);
+ checkAppenderState("before operation close ", LogDivertAppender.QUERY_ROUTING_APPENDER, queryId, expectedStopped);
+ checkAppenderState("before operation close ", LogDivertAppenderForTest.TEST_QUERY_ROUTING_APPENDER, queryId, expectedStopped);
client.closeOperation(operationHandle);
checkAppenderState("after operation close ", LogDivertAppender.QUERY_ROUTING_APPENDER, queryId, true);
checkAppenderState("after operation close ", LogDivertAppenderForTest.TEST_QUERY_ROUTING_APPENDER, queryId, true);
}
+ @Test
+ public void testSwitchLogLayout() throws Exception {
+ executeWithOperationLog(sqlCntStar, true);
+ }
+
+ @Test
+ public void testQueryLogDisabled() throws Exception {
+ OperationHandle operationHandle = client.executeStatement(sessionHandle,
+ "set hive.server2.logging.operation.enabled=false", null);
+ client.closeOperation(operationHandle);
+
+ executeWithOperationLog(sqlCntStar, false);
+
+ operationHandle = client.executeStatement(sessionHandle,
+ "set hive.server2.logging.operation.enabled=true", null);
+ client.closeOperation(operationHandle);
+ }
+
@Test
/**
* Test to make sure that appending log event to HushableRandomAccessFileAppender even after
diff --git a/itests/pom.xml b/itests/pom.xml
index 6d8bf97d72..3dc95bfb28 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -177,6 +177,17 @@
hive-serde
${project.version}
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+ tests
+
org.apache.hive
hive-exec
diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml
index 825fd3bcf9..b0373d5622 100644
--- a/itests/qtest-accumulo/pom.xml
+++ b/itests/qtest-accumulo/pom.xml
@@ -110,6 +110,11 @@
hive-serde
test
+
+ org.apache.hive
+ hive-udf
+ test
+
org.apache.hive
hive-exec
diff --git a/itests/qtest-druid/pom.xml b/itests/qtest-druid/pom.xml
index 05692c7a6c..6da72733e7 100644
--- a/itests/qtest-druid/pom.xml
+++ b/itests/qtest-druid/pom.xml
@@ -44,7 +44,7 @@
16.0.1
4.1.0
2.0.0
- 1.7.25
+ 1.7.30
diff --git a/itests/qtest-kudu/pom.xml b/itests/qtest-kudu/pom.xml
index a7e60e160a..132d22c5d5 100644
--- a/itests/qtest-kudu/pom.xml
+++ b/itests/qtest-kudu/pom.xml
@@ -99,6 +99,11 @@
hive-serde
test
+
+ org.apache.hive
+ hive-udf
+ test
+
org.apache.hive
hive-exec
diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml
index ebd7b63e6c..b6bbeef9e8 100644
--- a/itests/qtest-spark/pom.xml
+++ b/itests/qtest-spark/pom.xml
@@ -160,6 +160,11 @@
hive-serde
test
+
+ org.apache.hive
+ hive-udf
+ test
+
org.apache.hive
hive-exec
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index be8e377891..f6fce77835 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -39,7 +39,7 @@
false
-mkdir -p
- 1.7.25
+ 1.7.30
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 580ef95335..99ca9867b1 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -265,6 +265,7 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\
vector_decimal64_div_decimal64column.q,\
vector_decimal64_mul_decimal64scalar.q,\
vector_decimal64_mul_decimal64column.q,\
+ vector_decimal64_mul_intcolumn.q,\
vector_decimal_1.q,\
vector_decimal_10_0.q,\
vector_decimal_2.q,\
@@ -515,6 +516,7 @@ minillaplocal.query.files=\
compare_double_bigint_2.q,\
constprog_dpp.q,\
constant_prop_when.q,\
+ constant_prop_join_rs.q,\
constraints_alter.q,\
constraints_optimization.q,\
current_date_timestamp.q,\
@@ -539,6 +541,7 @@ minillaplocal.query.files=\
dynpart_sort_opt_vectorization.q,\
dynpart_sort_optimization.q,\
dynpart_sort_optimization_acid.q,\
+ dynpart_sort_opt_bucketing.q,\
enforce_constraint_notnull.q,\
escape1.q,\
escape2.q,\
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java
index 0997681cbf..712af828fe 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java
@@ -32,7 +32,7 @@
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.ql.QTestSystemProperties;
import org.apache.hadoop.hive.ql.QTestMiniClusters.FsType;
import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
index cd6bc34cc1..5b08f8b894 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
@@ -33,7 +33,7 @@
import java.util.stream.Stream;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConfUtil;
import org.apache.hadoop.hive.ql.QTestProcessExecResult;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
index 75015808bc..8baf1464b9 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
@@ -21,7 +21,7 @@
import java.io.IOException;
import java.util.Arrays;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestResultProcessor.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestResultProcessor.java
index 18b49fa7cb..204a9695b8 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestResultProcessor.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestResultProcessor.java
@@ -193,7 +193,7 @@ private static QTestProcessExecResult executeCmd(Collection args, String
public static QTestProcessExecResult executeCmd(String[] args, String outFile, String errFile)
throws Exception {
- System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(args, ' '));
+ System.out.println("Running: " + org.apache.commons.lang3.StringUtils.join(args, ' '));
PrintStream out = outFile == null ? SessionState.getConsole().getChildOutStream()
: new PrintStream(new FileOutputStream(outFile), true, "UTF-8");
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestSyntaxUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestSyntaxUtil.java
index 3412cae427..c2f7acda08 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestSyntaxUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestSyntaxUtil.java
@@ -22,7 +22,7 @@
import java.sql.SQLException;
import java.util.List;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.cli.CliSessionState;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 953253f16a..c5624f2723 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -41,7 +41,7 @@
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -76,6 +76,7 @@
import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
import org.apache.hadoop.hive.ql.processors.HiveCommand;
+import org.apache.hadoop.hive.ql.qoption.QTestAuthorizerHandler;
import org.apache.hadoop.hive.ql.qoption.QTestOptionDispatcher;
import org.apache.hadoop.hive.ql.qoption.QTestReplaceHandler;
import org.apache.hadoop.hive.ql.qoption.QTestSysDbHandler;
@@ -211,6 +212,7 @@ public QTestUtil(QTestArguments testArgs) throws Exception {
testFiles = datasetHandler.getDataDir(conf);
conf.set("test.data.dir", datasetHandler.getDataDir(conf));
conf.setVar(ConfVars.HIVE_QUERY_RESULTS_CACHE_DIRECTORY, "/tmp/hive/_resultscache_" + ProcessUtils.getPid());
+ dispatcher.register("authorizer", new QTestAuthorizerHandler());
dispatcher.register("dataset", datasetHandler);
dispatcher.register("replace", replaceHandler);
dispatcher.register("sysdb", new QTestSysDbHandler());
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckColumnAccessHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckColumnAccessHook.java
index 138f4ae0d4..238db5b825 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckColumnAccessHook.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckColumnAccessHook.java
@@ -22,7 +22,7 @@
import java.util.Map;
import java.util.LinkedHashMap;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.session.SessionState;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckTableAccessHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckTableAccessHook.java
index 4e996df9a5..bcefe89b50 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckTableAccessHook.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckTableAccessHook.java
@@ -21,7 +21,7 @@
import java.util.Map;
import java.util.LinkedHashMap;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.parse.TableAccessInfo;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifySessionStateStackTracesHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifySessionStateStackTracesHook.java
index aa75c481e8..6bb6a3c65b 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifySessionStateStackTracesHook.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifySessionStateStackTracesHook.java
@@ -20,7 +20,7 @@
import java.util.List;
import java.util.Map.Entry;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/qoption/QTestAuthorizerHandler.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/qoption/QTestAuthorizerHandler.java
new file mode 100644
index 0000000000..c74f72c0d6
--- /dev/null
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/qoption/QTestAuthorizerHandler.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.qoption;
+
+import org.apache.hadoop.hive.ql.QTestUtil;
+
+/**
+ * QTest authorizer option
+ *
+ * Enables authorization for the qtest.
+ *
+ * Example:
+ * --! qt:authorizer
+ */
+public class QTestAuthorizerHandler implements QTestOptionHandler {
+ private boolean enabled;
+
+ @Override
+ public void processArguments(String arguments) {
+ enabled = true;
+ }
+
+ @Override
+ public void beforeTest(QTestUtil qt) throws Exception {
+ if (enabled) {
+ qt.getConf().set("hive.test.authz.sstd.hs2.mode", "true");
+ qt.getConf().set("hive.security.authorization.manager",
+ "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest");
+ qt.getConf().set("hive.security.authenticator.manager",
+ "org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator");
+ qt.getConf().set("hive.security.authorization.enabled", "true");
+ }
+ }
+
+ @Override
+ public void afterTest(QTestUtil qt) throws Exception {
+ enabled = false;
+ }
+
+}
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/QFile.java b/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
index 34e7113683..aeb36f8f27 100644
--- a/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
+++ b/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
@@ -271,7 +271,7 @@ private QTestProcessExecResult executeDiff() throws IOException, InterruptedExce
diffCommandArgs.add(getQuotedString(expectedOutputFile));
diffCommandArgs.add(getQuotedString(outputFile));
- System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(diffCommandArgs,
+ System.out.println("Running: " + org.apache.commons.lang3.StringUtils.join(diffCommandArgs,
' '));
Process executor = Runtime.getRuntime().exec(diffCommandArgs.toArray(
new String[diffCommandArgs.size()]));
diff --git a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/DBRecordWritable.java b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/DBRecordWritable.java
index b062aa3ed7..77abae9ceb 100644
--- a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/DBRecordWritable.java
+++ b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/DBRecordWritable.java
@@ -20,9 +20,11 @@
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.sql.ParameterMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
+import java.sql.Types;
import java.util.Arrays;
import org.apache.hadoop.io.Writable;
@@ -59,8 +61,13 @@ public void write(PreparedStatement statement) throws SQLException {
if (columnValues == null) {
throw new SQLException("No data available to be written");
}
+ ParameterMetaData parameterMetaData = statement.getParameterMetaData();
for (int i = 0; i < columnValues.length; i++) {
- statement.setObject(i + 1, columnValues[i]);
+ Object value = columnValues[i];
+ if ((parameterMetaData.getParameterType(i + 1) == Types.CHAR) && value != null && value instanceof Boolean) {
+ value = ((Boolean) value).booleanValue() ? "1" : "0";
+ }
+ statement.setObject(i + 1, value);
}
}
diff --git a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java
index dbc8453d31..cd7cd4f7b2 100644
--- a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java
+++ b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java
@@ -30,6 +30,7 @@
import java.sql.ResultSet;
import java.sql.SQLDataException;
import java.sql.SQLException;
+import java.sql.Types;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@@ -109,7 +110,12 @@ public boolean hasNext() {
value = rs.getBigDecimal(i + 1);
break;
case BOOLEAN:
- value = rs.getBoolean(i + 1);
+ boolean b = rs.getBoolean(i + 1);
+ if (b && rs.getMetaData().getColumnType(i + 1) == Types.CHAR) {
+ // also accept Y/N in case of CHAR(1) - datanucleus stores booleans in CHAR(1) fields for derby
+ b = !"N".equals(rs.getString(i + 1));
+ }
+ value = b;
break;
case CHAR:
case VARCHAR:
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index cb0b0d1c92..bc332c19f1 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -27,7 +27,7 @@
import org.apache.hive.service.rpc.thrift.TSetClientInfoResp;
import org.apache.hive.service.rpc.thrift.TSetClientInfoReq;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.common.auth.HiveAuthUtils;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
index c6ac79373f..693203fab3 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
@@ -18,7 +18,7 @@
package org.apache.hive.jdbc;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hive.jdbc.logs.InPlaceUpdateStream;
import org.apache.hive.service.cli.RowSet;
diff --git a/kafka-handler/pom.xml b/kafka-handler/pom.xml
index a66a70af3e..6ad41deb99 100644
--- a/kafka-handler/pom.xml
+++ b/kafka-handler/pom.xml
@@ -115,7 +115,7 @@
org.slf4j
slf4j-api
- 1.7.25
+ 1.7.30
test
diff --git a/kudu-handler/pom.xml b/kudu-handler/pom.xml
index 6f02bd0ab8..c4661fac47 100644
--- a/kudu-handler/pom.xml
+++ b/kudu-handler/pom.xml
@@ -35,6 +35,12 @@
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+ provided
+
org.apache.hive
hive-exec
diff --git a/kudu-handler/src/test/results/positive/kudu_complex_queries.q.out b/kudu-handler/src/test/results/positive/kudu_complex_queries.q.out
index 73fc3e514f..44ce63cfc5 100644
--- a/kudu-handler/src/test/results/positive/kudu_complex_queries.q.out
+++ b/kudu-handler/src/test/results/positive/kudu_complex_queries.q.out
@@ -94,7 +94,7 @@ STAGE PLANS:
Statistics: Num rows: 309 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: UDFToDouble(_col0) (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: UDFToDouble(_col0) (type: double)
Statistics: Num rows: 309 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE
@@ -114,7 +114,7 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: UDFToDouble(_col0) (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: UDFToDouble(_col0) (type: double)
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -253,7 +253,7 @@ STAGE PLANS:
Statistics: Num rows: 154 Data size: 15862 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col2 (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col2 (type: double)
Statistics: Num rows: 154 Data size: 15862 Basic stats: COMPLETE Column stats: COMPLETE
@@ -274,7 +274,7 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: double)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -298,7 +298,7 @@ STAGE PLANS:
Statistics: Num rows: 121 Data size: 23232 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
- null sort order: a
+ null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 121 Data size: 23232 Basic stats: COMPLETE Column stats: COMPLETE
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java b/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java
index f2bb83a4b0..040932c870 100644
--- a/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java
@@ -22,7 +22,7 @@
import java.io.DataOutput;
import java.io.IOException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
diff --git a/llap-common/src/test/org/apache/hadoop/hive/llap/TestRow.java b/llap-common/src/test/org/apache/hadoop/hive/llap/TestRow.java
index 37e934df3a..ed6b005345 100644
--- a/llap-common/src/test/org/apache/hadoop/hive/llap/TestRow.java
+++ b/llap-common/src/test/org/apache/hadoop/hive/llap/TestRow.java
@@ -21,7 +21,7 @@
import java.util.List;
import java.util.Random;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
index d1d6acd398..2afb899148 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
@@ -21,7 +21,7 @@
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.llap.LlapUtil;
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
index 33ade55ee1..89bf5713df 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
@@ -33,7 +33,7 @@
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.hive.llap.counters.FragmentCountersMap;
import org.apache.hadoop.hive.llap.counters.WmFragmentCounters;
import org.apache.hadoop.hive.llap.daemon.SchedulerFragmentCompletingListener;
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
index 2ecb7a2830..d11bf1326c 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
@@ -67,7 +67,7 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.lang.mutable.MutableInt;
+import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.JvmPauseMonitor;
import org.apache.hadoop.hive.conf.HiveConf;
diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
index 5421d4d814..e3f5eb9386 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
@@ -1087,7 +1087,8 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` (
`CQ_START` bigint,
`CQ_RUN_AS` string,
`CQ_HIGHEST_WRITE_ID` bigint,
- `CQ_HADOOP_JOB_ID` string
+ `CQ_HADOOP_JOB_ID` string,
+ `CQ_ERROR_MESSAGE` string
)
STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
TBLPROPERTIES (
@@ -1105,7 +1106,8 @@ TBLPROPERTIES (
\"COMPACTION_QUEUE\".\"CQ_START\",
\"COMPACTION_QUEUE\".\"CQ_RUN_AS\",
\"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\",
- \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\"
+ \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\",
+ \"COMPACTION_QUEUE\".\"CQ_ERROR_MESSAGE\"
FROM \"COMPACTION_QUEUE\"
"
);
@@ -1123,7 +1125,8 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` (
`CC_END` bigint,
`CC_RUN_AS` string,
`CC_HIGHEST_WRITE_ID` bigint,
- `CC_HADOOP_JOB_ID` string
+ `CC_HADOOP_JOB_ID` string,
+ `CC_ERROR_MESSAGE` string
)
STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
TBLPROPERTIES (
@@ -1142,7 +1145,8 @@ TBLPROPERTIES (
\"COMPLETED_COMPACTIONS\".\"CC_END\",
\"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\",
\"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\",
- \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\"
+ \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\",
+ \"COMPLETED_COMPACTIONS\".\"CC_ERROR_MESSAGE\"
FROM \"COMPLETED_COMPACTIONS\"
"
);
diff --git a/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
index 9b498d0825..fa518747de 100644
--- a/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
@@ -137,6 +137,189 @@ LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_I
"
);
+CREATE EXTERNAL TABLE IF NOT EXISTS `SCHEDULED_QUERIES` (
+ `SCHEDULED_QUERY_ID` bigint,
+ `SCHEDULE_NAME` string,
+ `ENABLED` boolean,
+ `CLUSTER_NAMESPACE` string,
+ `SCHEDULE` string,
+ `USER` string,
+ `QUERY` string,
+ `NEXT_EXECUTION` bigint,
+ CONSTRAINT `SYS_PK_SCHEDULED_QUERIES` PRIMARY KEY (`SCHEDULED_QUERY_ID`) DISABLE
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ \"SCHEDULED_QUERY_ID\",
+ \"SCHEDULE_NAME\",
+ \"ENABLED\",
+ \"CLUSTER_NAMESPACE\",
+ \"SCHEDULE\",
+ \"USER\",
+ \"QUERY\",
+ \"NEXT_EXECUTION\"
+FROM
+ \"SCHEDULED_QUERIES\""
+);
+
+CREATE EXTERNAL TABLE IF NOT EXISTS `SCHEDULED_EXECUTIONS` (
+ `SCHEDULED_EXECUTION_ID` bigint,
+ `SCHEDULED_QUERY_ID` bigint,
+ `EXECUTOR_QUERY_ID` string,
+ `STATE` string,
+ `START_TIME` int,
+ `END_TIME` int,
+ `ERROR_MESSAGE` string,
+ `LAST_UPDATE_TIME` int,
+ CONSTRAINT `SYS_PK_SCHEDULED_EXECUTIONS` PRIMARY KEY (`SCHEDULED_EXECUTION_ID`) DISABLE
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ \"SCHEDULED_EXECUTION_ID\",
+ \"SCHEDULED_QUERY_ID\",
+ \"EXECUTOR_QUERY_ID\",
+ \"STATE\",
+ \"START_TIME\",
+ \"END_TIME\",
+ \"ERROR_MESSAGE\",
+ \"LAST_UPDATE_TIME\"
+FROM
+ \"SCHEDULED_EXECUTIONS\""
+);
+
+CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` (
+ `CQ_ID` bigint,
+ `CQ_DATABASE` string,
+ `CQ_TABLE` string,
+ `CQ_PARTITION` string,
+ `CQ_STATE` string,
+ `CQ_TYPE` string,
+ `CQ_TBLPROPERTIES` string,
+ `CQ_WORKER_ID` string,
+ `CQ_START` bigint,
+ `CQ_RUN_AS` string,
+ `CQ_HIGHEST_WRITE_ID` bigint,
+ `CQ_HADOOP_JOB_ID` string,
+ `CQ_ERROR_MESSAGE` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ \"COMPACTION_QUEUE\".\"CQ_ID\",
+ \"COMPACTION_QUEUE\".\"CQ_DATABASE\",
+ \"COMPACTION_QUEUE\".\"CQ_TABLE\",
+ \"COMPACTION_QUEUE\".\"CQ_PARTITION\",
+ \"COMPACTION_QUEUE\".\"CQ_STATE\",
+ \"COMPACTION_QUEUE\".\"CQ_TYPE\",
+ \"COMPACTION_QUEUE\".\"CQ_TBLPROPERTIES\",
+ \"COMPACTION_QUEUE\".\"CQ_WORKER_ID\",
+ \"COMPACTION_QUEUE\".\"CQ_START\",
+ \"COMPACTION_QUEUE\".\"CQ_RUN_AS\",
+ \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\",
+ \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\",
+ \"COMPACTION_QUEUE\".\"CQ_ERROR_MESSAGE\"
+FROM \"COMPACTION_QUEUE\"
+"
+);
+
+CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` (
+ `CC_ID` bigint,
+ `CC_DATABASE` string,
+ `CC_TABLE` string,
+ `CC_PARTITION` string,
+ `CC_STATE` string,
+ `CC_TYPE` string,
+ `CC_TBLPROPERTIES` string,
+ `CC_WORKER_ID` string,
+ `CC_START` bigint,
+ `CC_END` bigint,
+ `CC_RUN_AS` string,
+ `CC_HIGHEST_WRITE_ID` bigint,
+ `CC_HADOOP_JOB_ID` string,
+ `CC_ERROR_MESSAGE` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ \"COMPLETED_COMPACTIONS\".\"CC_ID\",
+ \"COMPLETED_COMPACTIONS\".\"CC_DATABASE\",
+ \"COMPLETED_COMPACTIONS\".\"CC_TABLE\",
+ \"COMPLETED_COMPACTIONS\".\"CC_PARTITION\",
+ \"COMPLETED_COMPACTIONS\".\"CC_STATE\",
+ \"COMPLETED_COMPACTIONS\".\"CC_TYPE\",
+ \"COMPLETED_COMPACTIONS\".\"CC_TBLPROPERTIES\",
+ \"COMPLETED_COMPACTIONS\".\"CC_WORKER_ID\",
+ \"COMPLETED_COMPACTIONS\".\"CC_START\",
+ \"COMPLETED_COMPACTIONS\".\"CC_END\",
+ \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\",
+ \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\",
+ \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\",
+ \"COMPLETED_COMPACTIONS\".\"CC_ERROR_MESSAGE\"
+FROM \"COMPLETED_COMPACTIONS\"
+"
+);
+
+CREATE OR REPLACE VIEW `COMPACTIONS`
+(
+ `C_ID`,
+ `C_CATALOG`,
+ `C_DATABASE`,
+ `C_TABLE`,
+ `C_PARTITION`,
+ `C_TYPE`,
+ `C_STATE`,
+ `C_HOSTNAME`,
+ `C_WORKER_ID`,
+ `C_START`,
+ `C_DURATION`,
+ `C_HADOOP_JOB_ID`,
+ `C_RUN_AS`,
+ `C_HIGHEST_WRITE_ID`
+) AS
+SELECT
+ CC_ID,
+ 'default',
+ CC_DATABASE,
+ CC_TABLE,
+ CC_PARTITION,
+ CASE WHEN CC_TYPE = 'i' THEN 'minor' WHEN CC_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END,
+ CASE WHEN CC_STATE = 'f' THEN 'failed' WHEN CC_STATE = 's' THEN 'succeeded' WHEN CC_STATE = 'a' THEN 'attempted' ELSE 'UNKNOWN' END,
+ CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[0] END,
+ CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[1] END,
+ CC_START,
+ CASE WHEN CC_END IS NULL THEN cast (null as string) ELSE CC_END-CC_START END,
+ CC_HADOOP_JOB_ID,
+ CC_RUN_AS,
+ CC_HIGHEST_WRITE_ID
+FROM COMPLETED_COMPACTIONS
+UNION ALL
+SELECT
+ CQ_ID,
+ 'default',
+ CQ_DATABASE,
+ CQ_TABLE,
+ CQ_PARTITION,
+ CASE WHEN CQ_TYPE = 'i' THEN 'minor' WHEN CQ_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END,
+ CASE WHEN CQ_STATE = 'i' THEN 'initiated' WHEN CQ_STATE = 'w' THEN 'working' WHEN CQ_STATE = 'r' THEN 'ready for cleaning' ELSE 'UNKNOWN' END,
+ CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[0] END,
+ CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[1] END,
+ CQ_START,
+ cast (null as string),
+ CQ_HADOOP_JOB_ID,
+ CQ_RUN_AS,
+ CQ_HIGHEST_WRITE_ID
+FROM COMPACTION_QUEUE;
+
-- HIVE-22553
CREATE EXTERNAL TABLE IF NOT EXISTS `TXNS` (
`TXN_ID` bigint,
@@ -312,4 +495,80 @@ DROP TABLE IF EXISTS `VERSION`;
CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.0.0' AS `SCHEMA_VERSION`,
'Hive release version 4.0.0' AS `VERSION_COMMENT`;
+USE INFORMATION_SCHEMA;
+
+create or replace view SCHEDULED_QUERIES as
+select
+ `SCHEDULED_QUERY_ID` ,
+ `SCHEDULE_NAME` ,
+ `ENABLED`,
+ `CLUSTER_NAMESPACE`,
+ `SCHEDULE`,
+ `USER`,
+ `QUERY`,
+ FROM_UNIXTIME(NEXT_EXECUTION) as NEXT_EXECUTION
+FROM
+ SYS.SCHEDULED_QUERIES
+;
+
+create or replace view SCHEDULED_EXECUTIONS as
+SELECT
+ SCHEDULED_EXECUTION_ID,
+ SCHEDULE_NAME,
+ EXECUTOR_QUERY_ID,
+ `STATE`,
+ FROM_UNIXTIME(START_TIME) as START_TIME,
+ FROM_UNIXTIME(END_TIME) as END_TIME,
+ END_TIME-START_TIME as ELAPSED,
+ ERROR_MESSAGE,
+ FROM_UNIXTIME(LAST_UPDATE_TIME) AS LAST_UPDATE_TIME
+FROM
+ SYS.SCHEDULED_EXECUTIONS SE
+JOIN
+ SYS.SCHEDULED_QUERIES SQ
+WHERE
+ SE.SCHEDULED_QUERY_ID=SQ.SCHEDULED_QUERY_ID;
+
+CREATE OR REPLACE VIEW `COMPACTIONS`
+(
+ `C_ID`,
+ `C_CATALOG`,
+ `C_DATABASE`,
+ `C_TABLE`,
+ `C_PARTITION`,
+ `C_TYPE`,
+ `C_STATE`,
+ `C_HOSTNAME`,
+ `C_WORKER_ID`,
+ `C_START`,
+ `C_DURATION`,
+ `C_HADOOP_JOB_ID`,
+ `C_RUN_AS`,
+ `C_HIGHEST_WRITE_ID`
+) AS
+SELECT DISTINCT
+ C_ID,
+ C_CATALOG,
+ C_DATABASE,
+ C_TABLE,
+ C_PARTITION,
+ C_TYPE,
+ C_STATE,
+ C_HOSTNAME,
+ C_WORKER_ID,
+ C_START,
+ C_DURATION,
+ C_HADOOP_JOB_ID,
+ C_RUN_AS,
+ C_HIGHEST_WRITE_ID
+FROM
+ `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`)
+ JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`)
+ LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`)
+WHERE
+ (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL
+ AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER'
+ OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP'))
+ AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer());
+
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0';
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java
index 6c33f63298..b68511d4fa 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java
@@ -30,8 +30,8 @@
import javax.security.auth.login.LoginException;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.annotation.NoReconnect;
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java b/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
index 59bcd5ca34..465d914043 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
@@ -35,11 +35,11 @@
try {
if (envContext != null) {
String addedJars = envContext.getProperties().get("hive.added.jars.path");
- if (org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+ if (org.apache.commons.lang3.StringUtils.isNotBlank(addedJars)) {
//for thread safe
orgHiveLoader = conf.getClassLoader();
ClassLoader loader = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.addToClassPath(
- orgHiveLoader, org.apache.commons.lang.StringUtils.split(addedJars, ","));
+ orgHiveLoader, org.apache.commons.lang3.StringUtils.split(addedJars, ","));
conf.setClassLoader(loader);
}
}
diff --git a/parser/pom.xml b/parser/pom.xml
new file mode 100644
index 0000000000..05fd78d4a3
--- /dev/null
+++ b/parser/pom.xml
@@ -0,0 +1,107 @@
+
+
+
+ 4.0.0
+
+ org.apache.hive
+ hive
+ 4.0.0-SNAPSHOT
+ ../pom.xml
+
+
+ hive-parser
+ jar
+ Hive Parser
+
+
+ ..
+
+
+
+
+
+
+ org.apache.hive
+ hive-common
+ ${project.version}
+
+
+ org.eclipse.jetty.aggregate
+ jetty-all
+
+
+
+
+
+ org.antlr
+ antlr-runtime
+ ${antlr.version}
+
+
+ org.antlr
+ ST4
+ ${ST4.version}
+
+
+
+
+
+ ${basedir}/src/java
+ ${basedir}/src/test
+
+
+
+ org.antlr
+ antlr3-maven-plugin
+
+
+
+ antlr
+
+
+
+
+ ${basedir}/src/java
+
+ **/HiveLexer.g
+ **/HiveParser.g
+ **/HintParser.g
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-source
+ generate-sources
+
+ add-source
+
+
+
+ ${project.build.directory}/generated-sources/java
+
+
+
+
+
+
+
+
+
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorNode.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorNode.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorNode.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorNode.java
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorUtils.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorUtils.java
new file mode 100644
index 0000000000..ed05673e12
--- /dev/null
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorUtils.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import org.antlr.runtime.tree.Tree;
+
+public class ASTErrorUtils {
+
+ private static int getLine(ASTNode tree) {
+ if (tree.getChildCount() == 0) {
+ return tree.getToken().getLine();
+ }
+
+ return getLine((ASTNode) tree.getChild(0));
+ }
+
+ private static int getCharPositionInLine(ASTNode tree) {
+ if (tree.getChildCount() == 0) {
+ return tree.getToken().getCharPositionInLine();
+ }
+
+ return getCharPositionInLine((ASTNode) tree.getChild(0));
+ }
+
+ // Dirty hack as this will throw away spaces and other things - find a better
+ // way!
+ public static String getText(ASTNode tree) {
+ if (tree.getChildCount() == 0) {
+ return tree.getText();
+ }
+ return getText((ASTNode) tree.getChild(tree.getChildCount() - 1));
+ }
+
+ public static String getMsg(String mesg, ASTNode tree) {
+ StringBuilder sb = new StringBuilder();
+ renderPosition(sb, tree);
+ sb.append(" ");
+ sb.append(mesg);
+ sb.append(" '");
+ sb.append(getText(tree));
+ sb.append("'");
+ renderOrigin(sb, tree.getOrigin());
+ return sb.toString();
+ }
+
+ static final String LINE_SEP = System.getProperty("line.separator");
+
+ public static void renderOrigin(StringBuilder sb, ASTNodeOrigin origin) {
+ while (origin != null) {
+ sb.append(" in definition of ");
+ sb.append(origin.getObjectType());
+ sb.append(" ");
+ sb.append(origin.getObjectName());
+ sb.append(" [");
+ sb.append(LINE_SEP);
+ sb.append(origin.getObjectDefinition());
+ sb.append(LINE_SEP);
+ sb.append("] used as ");
+ sb.append(origin.getUsageAlias());
+ sb.append(" at ");
+ ASTNode usageNode = origin.getUsageNode();
+ renderPosition(sb, usageNode);
+ origin = usageNode.getOrigin();
+ }
+ }
+
+ private static void renderPosition(StringBuilder sb, ASTNode tree) {
+ sb.append("Line ");
+ sb.append(getLine(tree));
+ sb.append(":");
+ sb.append(getCharPositionInLine(tree));
+ }
+
+ public static String renderPosition(ASTNode n) {
+ StringBuilder sb = new StringBuilder();
+ renderPosition(sb, n);
+ return sb.toString();
+ }
+
+ public static String getMsg(String mesg, Tree tree) {
+ return getMsg(mesg, (ASTNode) tree);
+ }
+
+ public static String getMsg(String mesg, ASTNode tree, String reason) {
+ return getMsg(mesg, tree) + ": " + reason;
+ }
+
+ public static String getMsg(String mesg, Tree tree, String reason) {
+ return getMsg(mesg, (ASTNode) tree, reason);
+ }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseError.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseError.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ParseError.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ParseError.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
diff --git a/pom.xml b/pom.xml
index c5e062b560..2947a2928e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -46,6 +46,8 @@
hplsql
jdbc
metastore
+ parser
+ udf
ql
serde
service-rpc
@@ -143,6 +145,7 @@
1.4
2.6.1
2.6.0
+ 1.8
10.14.1.0
3.1.0
0.1.2
@@ -200,7 +203,7 @@
1.5.6
2.5.0
1.0.1
- 1.7.10
+ 1.7.30
4.0.4
2.7.0-SNAPSHOT
0.9.1
@@ -373,6 +376,11 @@
junit
${junit.version}
+
+ org.apache.commons
+ commons-text
+ ${commons-text.version}
+
org.apache.logging.log4j
log4j-1.2-api
@@ -1297,6 +1305,13 @@
true
+
+ Do not use commons-lang
+
+ org.apache.commons.lang.**
+
+ true
+
diff --git a/ql/pom.xml b/ql/pom.xml
index e5eed46947..3632a5efe4 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -63,6 +63,16 @@
hive-serde
${project.version}
+
+ org.apache.hive
+ hive-parser
+ ${project.version}
+
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+
org.apache.hive
hive-service-rpc
@@ -119,6 +129,11 @@
commons-lang3
${commons-lang3.version}
+
+ org.apache.commons
+ commons-text
+ ${commons-text.version}
+
javolution
javolution
@@ -139,16 +154,6 @@
log4j-slf4j-impl
${log4j2.version}
-
- org.antlr
- antlr-runtime
- ${antlr.version}
-
-
- org.antlr
- ST4
- ${ST4.version}
-
org.apache.avro
avro
@@ -851,25 +856,6 @@
${basedir}/src/test
-
- org.antlr
- antlr3-maven-plugin
-
-
-
- antlr
-
-
-
-
- ${basedir}/src/java
-
- **/HiveLexer.g
- **/HiveParser.g
- **/HintParser.g
-
-
-
org.apache.maven.plugins
maven-antrun-plugin
@@ -954,6 +940,7 @@
org.apache.hive:hive-common
+ org.apache.hive:hive-udf
org.apache.hive:hive-exec
org.apache.hive:hive-serde
org.apache.hive:hive-llap-common
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index 7a7fce1b0b..a85b94c475 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -923,7 +923,7 @@ public void resetStream() {
* Little abbreviation for StringUtils.
*/
private static boolean strEquals(String str1, String str2) {
- return org.apache.commons.lang.StringUtils.equals(str1, str2);
+ return org.apache.commons.lang3.StringUtils.equals(str1, str2);
}
/**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java
index 3dc6bf56f2..13b419e73f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java
@@ -28,7 +28,8 @@
import java.util.SortedMap;
import java.util.TreeMap;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -45,6 +46,8 @@
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hive.common.util.HiveStringUtils;
import org.apache.hive.common.util.ReflectionUtil;
import org.slf4j.Logger;
@@ -219,4 +222,17 @@ private static String getHS2Host(HiveConf conf) throws SemanticException {
throw new SemanticException("Kill query is only supported in HiveServer2 (not hive cli)");
}
+
+ public static boolean isEncryptionZoneRoot(Path path, Configuration conf) throws IOException {
+ HadoopShims hadoopShims = ShimLoader.getHadoopShims();
+ HadoopShims.HdfsEncryptionShim pathEncryptionShim
+ = hadoopShims.createHdfsEncryptionShim(
+ path.getFileSystem(conf), conf);
+ if (pathEncryptionShim.isPathEncrypted(path)) {
+ if (pathEncryptionShim.getEncryptionZoneForPath(path).getPath().equals(path)) {
+ return true;
+ }
+ }
+ return false;
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationOperation.java
index 44871b4c5d..748236cc1e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationOperation.java
@@ -22,7 +22,7 @@
import java.net.URISyntaxException;
import java.util.Map;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/desc/DescFunctionOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/desc/DescFunctionOperation.java
index 6a94a93ef9..ea3f522e58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/desc/DescFunctionOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/desc/DescFunctionOperation.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.FunctionInfo.FunctionResource;
-import static org.apache.commons.lang.StringUtils.join;
+import static org.apache.commons.lang3.StringUtils.join;
import java.io.DataOutputStream;
import java.io.IOException;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/CreateMacroAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/CreateMacroAnalyzer.java
index 4eed5c9861..a2177e0291 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/CreateMacroAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/CreateMacroAnalyzer.java
@@ -37,7 +37,7 @@
import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
import org.apache.hadoop.hive.ql.ddl.DDLWork;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.lib.PreOrderWalker;
import org.apache.hadoop.hive.ql.parse.ASTNode;
@@ -91,7 +91,7 @@ public void analyzeInternal(ASTNode root) throws SemanticException {
if (!arguments.isEmpty()) {
// Walk down expression to see which arguments are actually used.
Node expression = (Node) root.getChild(2);
- PreOrderWalker walker = new PreOrderWalker(new Dispatcher() {
+ PreOrderWalker walker = new PreOrderWalker(new SemanticDispatcher() {
@Override
public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException {
if (nd instanceof ASTNode) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/CacheMetadataAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/CacheMetadataAnalyzer.java
index fa20f23815..c040b40d93 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/CacheMetadataAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/CacheMetadataAnalyzer.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hive.ql.ddl.DDLWork;
import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
import org.apache.hadoop.hive.ql.ddl.function.AbstractFunctionAnalyzer;
+import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -50,7 +51,7 @@ public void analyzeInternal(ASTNode root) throws SemanticException {
// In 2 cases out of 3, we could pass the path and type directly to metastore...
if (AnalyzeCommandUtils.isPartitionLevelStats(root)) {
Map partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(table, root, conf);
- Partition part = getPartition(table, partSpec, true);
+ Partition part = PartitionUtils.getPartition(db, table, partSpec, true);
desc = new CacheMetadataDesc(table.getDbName(), table.getTableName(), part.getName());
inputs.add(new ReadEntity(part));
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
index 9348efc5a1..e470914ed8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
@@ -32,8 +32,8 @@
private static final long serialVersionUID = 1L;
public static final String SCHEMA =
- "compactionid,dbname,tabname,partname,type,state,hostname,workerid,starttime,duration,hadoopjobid#" +
- "string:string:string:string:string:string:string:string:string:string:string";
+ "compactionid,dbname,tabname,partname,type,state,hostname,workerid,starttime,duration,hadoopjobid,errormessage#" +
+ "string:string:string:string:string:string:string:string:string:string:string:string";
private String resFile;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
index 517d88237c..d45597ba95 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
@@ -86,6 +86,8 @@ private void writeHeader(DataOutputStream os) throws IOException {
os.writeBytes("Duration(ms)");
os.write(Utilities.tabCode);
os.writeBytes("HadoopJobId");
+ os.write(Utilities.tabCode);
+ os.writeBytes("Error message");
os.write(Utilities.newLineCode);
}
@@ -115,6 +117,9 @@ private void writeRow(DataOutputStream os, ShowCompactResponseElement e) throws
os.writeBytes(e.isSetEndTime() ? Long.toString(e.getEndTime() - e.getStart()) : NO_VAL);
os.write(Utilities.tabCode);
os.writeBytes(e.isSetHadoopJobId() ? e.getHadoopJobId() : NO_VAL);
+ os.write(Utilities.tabCode);
+ String error = e.getErrorMessage();
+ os.writeBytes(error == null ? NO_VAL : error);
os.write(Utilities.newLineCode);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java
index 1adcef655f..81800fe000 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId;
+import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
@@ -112,7 +113,7 @@ protected void addInputsOutputsAlterTable(TableName tableName, Map columns, List genExprs = TypeCheckProcFactory.genExprNode(checkExprAST, typeCheckCtx);
+ Map genExprs = ExprNodeTypeCheck.genExprNode(checkExprAST, typeCheckCtx);
ExprNodeDesc checkExpr = genExprs.get(checkExprAST);
if (checkExpr == null) {
throw new SemanticException(
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/CreateTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/CreateTableOperation.java
index cf4bc81ac8..da461bcbcd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/CreateTableOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/CreateTableOperation.java
@@ -59,6 +59,14 @@ public int execute() throws HiveException {
LOG.debug("creating table {} on {}", tbl.getFullyQualifiedName(), tbl.getDataLocation());
boolean replDataLocationChanged = false;
+ try {
+ if (tbl.getSd().getLocation() != null
+ && DDLUtils.isEncryptionZoneRoot(new Path(tbl.getSd().getLocation()), context.getConf())) {
+ throw new HiveException("Table Location cannot be set to encryption zone root dir");
+ }
+ } catch (IOException e) {
+ throw new HiveException(e);
+ }
if (desc.getReplicationSpec().isInReplicationScope()) {
// If in replication scope, we should check if the object we're looking at exists, and if so,
// trigger replace-mode semantics.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/show/ShowCreateTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/show/ShowCreateTableOperation.java
index affed03fbb..e07559f692 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/show/ShowCreateTableOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/create/show/ShowCreateTableOperation.java
@@ -36,7 +36,7 @@
import java.util.Set;
import java.util.SortedMap;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.TableType;
@@ -51,6 +51,12 @@
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.util.DirectionUtils;
import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
import org.apache.hive.common.util.HiveStringUtils;
import org.stringtemplate.v4.ST;
@@ -142,16 +148,64 @@ private String getExternal(Table table) {
private String getColumns(Table table) {
List columnDescs = new ArrayList();
- for (FieldSchema col : table.getCols()) {
- String columnDesc = " `" + col.getName() + "` " + col.getType();
- if (col.getComment() != null) {
- columnDesc += " COMMENT '" + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'";
+ for (FieldSchema column : table.getCols()) {
+ String columnType = formatType(TypeInfoUtils.getTypeInfoFromTypeString(column.getType()));
+ String columnDesc = " `" + column.getName() + "` " + columnType;
+ if (column.getComment() != null) {
+ columnDesc += " COMMENT '" + HiveStringUtils.escapeHiveCommand(column.getComment()) + "'";
}
columnDescs.add(columnDesc);
}
return StringUtils.join(columnDescs, ", \n");
}
+ /** Struct fields are identifiers, need to be put between ``. */
+ private String formatType(TypeInfo typeInfo) {
+ switch (typeInfo.getCategory()) {
+ case PRIMITIVE:
+ return typeInfo.getTypeName();
+ case STRUCT:
+ StringBuilder structFormattedType = new StringBuilder();
+
+ StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
+ for (int i = 0; i < structTypeInfo.getAllStructFieldNames().size(); i++) {
+ if (structFormattedType.length() != 0) {
+ structFormattedType.append(", ");
+ }
+
+ String structElementName = structTypeInfo.getAllStructFieldNames().get(i);
+ String structElementType = formatType(structTypeInfo.getAllStructFieldTypeInfos().get(i));
+
+ structFormattedType.append("`" + structElementName + "`:" + structElementType);
+ }
+ return "struct<" + structFormattedType.toString() + ">";
+ case LIST:
+ ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
+ String elementType = formatType(listTypeInfo.getListElementTypeInfo());
+ return "array<" + elementType + ">";
+ case MAP:
+ MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
+ String keyTypeInfo = mapTypeInfo.getMapKeyTypeInfo().getTypeName();
+ String valueTypeInfo = formatType(mapTypeInfo.getMapValueTypeInfo());
+ return "map<" + keyTypeInfo + "," + valueTypeInfo + ">";
+ case UNION:
+ StringBuilder unionFormattedType = new StringBuilder();
+
+ UnionTypeInfo unionTypeInfo = (UnionTypeInfo)typeInfo;
+ for (TypeInfo unionElementTypeInfo : unionTypeInfo.getAllUnionObjectTypeInfos()) {
+ if (unionFormattedType.length() != 0) {
+ unionFormattedType.append(", ");
+ }
+
+ String unionElementType = formatType(unionElementTypeInfo);
+ unionFormattedType.append(unionElementType);
+ }
+ return "uniontype<" + unionFormattedType.toString() + ">";
+ default:
+ throw new RuntimeException("Unknown type: " + typeInfo.getCategory());
+ }
+ }
+
private String getComment(Table table) {
String comment = table.getProperty("comment");
return (comment != null) ? "COMMENT '" + HiveStringUtils.escapeHiveCommand(comment) + "'" : "";
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java
index 5178fb5fb5..7e467dd9f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java
@@ -26,7 +26,7 @@
import java.util.Map;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.ValidTxnList;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java
index 72db45755a..ff6b08b5d5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java
@@ -21,7 +21,7 @@
import java.util.ArrayList;
import java.util.List;
-import org.apache.commons.lang.BooleanUtils;
+import org.apache.commons.lang3.BooleanUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java
new file mode 100644
index 0000000000..f7de0c605b
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table.partition;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Utilities for partition related DDL operations.
+ */
+public final class PartitionUtils {
+ private static final Logger LOG = LoggerFactory.getLogger(PartitionUtils.class);
+
+ private PartitionUtils() {
+ throw new UnsupportedOperationException("PartitionUtils should not be instantiated");
+ }
+
+ /**
+ * Certain partition values are are used by hive. e.g. the default partition in dynamic partitioning and the
+ * intermediate partition values used in the archiving process. Naturally, prohibit the user from creating partitions
+ * with these reserved values. The check that this function is more restrictive than the actual limitation, but it's
+ * simpler. Should be okay since the reserved names are fairly long and uncommon.
+ */
+ public static void validatePartitions(HiveConf conf, Map partitionSpec) throws SemanticException {
+ Set reservedPartitionValues = new HashSet<>();
+ // Partition can't have this name
+ reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME));
+ reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_ZOOKEEPER_PARTITION_NAME));
+ // Partition value can't end in this suffix
+ reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL));
+ reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED));
+ reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED));
+
+ for (Entry e : partitionSpec.entrySet()) {
+ for (String s : reservedPartitionValues) {
+ String value = e.getValue();
+ if (value != null && value.contains(s)) {
+ throw new SemanticException(ErrorMsg.RESERVED_PART_VAL.getMsg(
+ "(User value: " + e.getValue() + " Reserved substring: " + s + ")"));
+ }
+ }
+ }
+ }
+
+ public static ExprNodeGenericFuncDesc makeBinaryPredicate(String fn, ExprNodeDesc left, ExprNodeDesc right)
+ throws SemanticException {
+ return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
+ FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), Lists.newArrayList(left, right));
+ }
+
+ public static ExprNodeGenericFuncDesc makeUnaryPredicate(String fn, ExprNodeDesc arg) throws SemanticException {
+ return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
+ FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), Lists.newArrayList(arg));
+ }
+
+ public static Partition getPartition(Hive db, Table table, Map partitionSpec, boolean throwException)
+ throws SemanticException {
+ Partition partition;
+ try {
+ partition = db.getPartition(table, partitionSpec, false);
+ } catch (Exception e) {
+ throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partitionSpec), e);
+ }
+ if (partition == null && throwException) {
+ throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partitionSpec));
+ }
+ return partition;
+ }
+
+ public static List getPartitions(Hive db, Table table, Map partitionSpec,
+ boolean throwException) throws SemanticException {
+ List partitions;
+ try {
+ partitions = partitionSpec == null ? db.getPartitions(table) : db.getPartitions(table, partitionSpec);
+ } catch (Exception e) {
+ throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partitionSpec), e);
+ }
+ if (partitions.isEmpty() && throwException) {
+ throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partitionSpec));
+ }
+ return partitions;
+ }
+
+ private static String toMessage(ErrorMsg message, Object detail) {
+ return detail == null ? message.getMsg() : message.getMsg(detail.toString());
+ }
+
+ /**
+ * Add the table partitions to be modified in the output, so that it is available for the pre-execution hook.
+ */
+ public static void addTablePartsOutputs(Hive db, Set outputs, Table table,
+ List