diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index e138800..d83453e 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1945,7 +1945,14 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1, "Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."), - HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, ""), + HIVE_PARTITION_CHECK_COLUMN_TYPE("hive.partition.check.column.type", true, + "Whether to check, convert, and normalize partition value to conform to its column type"), + + HIVE_PARTITION_CHECK_OLD_COLUMN_TYPE_IN_RENAME("hive.partition.check.old.column.type.in.rename", false, + "Whether to check the old partition column values in partition rename. It should be set to false when " + + " the partition rename is used to covert and normalize the existing non-comforming partition " + + " column values."), + HIVE_HADOOP_CLASSPATH("hive.hadoop.classpath", null, "For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting HiveServer2 \n" + "using \"-hiveconf hive.hadoop.classpath=%HIVE_LIB%\"."), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 19234b5..f190af0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryProperties; -import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -63,6 +62,7 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; @@ -973,17 +973,6 @@ public void setUpdateColumnAccessInfo(ColumnAccessInfo updateColumnAccessInfo) { this.updateColumnAccessInfo = updateColumnAccessInfo; } - protected LinkedHashMap extractPartitionSpecs(Tree partspec) - throws SemanticException { - LinkedHashMap partSpec = new LinkedHashMap(); - for (int i = 0; i < partspec.getChildCount(); ++i) { - CommonTree partspec_val = (CommonTree) partspec.getChild(i); - String val = stripQuotes(partspec_val.getChild(1).getText()); - partSpec.put(partspec_val.getChild(0).getText().toLowerCase(), val); - } - return partSpec; - } - /** * Checks if given specification is proper specification for prefix of * partition cols, for table partitioned by ds, hr, min valid ones are @@ -1240,8 +1229,12 @@ private static boolean getPartExprNodeDesc(ASTNode astNode, public static void validatePartSpec(Table tbl, Map partSpec, ASTNode astNode, HiveConf conf, boolean shouldBeFull) throws SemanticException { tbl.validatePartColumnNames(partSpec, shouldBeFull); + validatePartColumnType(tbl, partSpec, astNode, conf); + } - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TYPE_CHECK_ON_INSERT)) { + public static void validatePartColumnType(Table tbl, Map partSpec, + ASTNode astNode, HiveConf conf) throws SemanticException { + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_PARTITION_CHECK_COLUMN_TYPE)) { return; } @@ -1266,29 +1259,38 @@ public static void validatePartSpec(Table tbl, Map partSpec, astKeyName = stripIdentifierQuotes(astKeyName); } String colType = partCols.get(astKeyName); - ObjectInspector inputOI = astExprNodePair.getValue().getWritableObjectInspector(); + ObjectInspector inputOI = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo + (astExprNodePair.getValue().getTypeInfo()); TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(colType); ObjectInspector outputOI = - TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); - Object value = null; - String colSpec = partSpec.get(astKeyName); - try { - value = - ExprNodeEvaluatorFactory.get(astExprNodePair.getValue()). - evaluate(colSpec); - } catch (HiveException e) { - throw new SemanticException(e); - } - Object convertedValue = - ObjectInspectorConverters.getConverter(inputOI, outputOI).convert(value); - if (convertedValue == null) { - throw new SemanticException(ErrorMsg.PARTITION_SPEC_TYPE_MISMATCH, astKeyName, - inputOI.getTypeName(), outputOI.getTypeName()); + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(expectedType); + //Since partVal is a constant, it is safe to cast ExprNodeDesc to ExprNodeConstantDesc. + //Its value should be in normalized format (e.g. no leading zero in integer, date is in + //format of YYYY-MM-DD etc) + Object value = ((ExprNodeConstantDesc)astExprNodePair.getValue()).getValue(); + Object convertedValue = value; + if (!inputOI.getTypeName().equals(outputOI.getTypeName())) { + convertedValue = ObjectInspectorConverters.getConverter(inputOI, outputOI).convert(value); + if (convertedValue == null) { + throw new SemanticException(ErrorMsg.PARTITION_SPEC_TYPE_MISMATCH, astKeyName, + inputOI.getTypeName(), outputOI.getTypeName()); + } + + if (!convertedValue.toString().equals(value.toString())) { + //value might have been changed because of the normalization in conversion + STATIC_LOG.warn("Partition " + astKeyName + " expects type " + outputOI.getTypeName() + + " but input value is in type " + inputOI.getTypeName() + ". Convert " + + value.toString() + " to " + convertedValue.toString()); + } } - normalizeColSpec(partSpec, astKeyName, colType, colSpec, convertedValue); + if (!convertedValue.toString().equals(partSpec.get(astKeyName))) { + STATIC_LOG.warn("Partition Spec " + astKeyName + "=" + partSpec.get(astKeyName) + + " has been changed to " + astKeyName + "=" + convertedValue.toString()); + } + partSpec.put(astKeyName, convertedValue.toString()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index e8066be..a5f0a7f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -101,25 +101,13 @@ private Table getTable(ASTNode tree) throws SemanticException { return getTable(names[0], names[1], true); } - private Map getPartKeyValuePairsFromAST(ASTNode tree) { + private Map getPartKeyValuePairsFromAST(Table tbl, ASTNode tree, + HiveConf hiveConf) throws SemanticException { ASTNode child = ((ASTNode) tree.getChild(0).getChild(1)); Map partSpec = new HashMap(); - if (null == child) { - // case of analyze table T compute statistics for columns; - return partSpec; - } - String partKey; - String partValue; - for (int i = 0; i < child.getChildCount(); i++) { - partKey = new String(getUnescapedName((ASTNode) child.getChild(i).getChild(0))).toLowerCase(); - if (child.getChild(i).getChildCount() > 1) { - partValue = new String(getUnescapedName((ASTNode) child.getChild(i).getChild(1))); - partValue = partValue.replaceAll("'", ""); - } else { - partValue = null; - } - partSpec.put(partKey, partValue); - } + if (child != null) { + partSpec = DDLSemanticAnalyzer.getValidatedPartSpec(tbl, child, hiveConf, false); + } //otherwise, it is the case of analyze table T compute statistics for columns; return partSpec; } @@ -426,7 +414,7 @@ public void analyze(ASTNode ast, Context origCtx) throws SemanticException { if (isPartitionStats) { isTableLevel = false; - partSpec = getPartKeyValuePairsFromAST(ast); + partSpec = getPartKeyValuePairsFromAST(tbl, ast, conf); handlePartialPartitionSpec(partSpec); } else { isTableLevel = true; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 8302067..5b5d40c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -245,7 +245,20 @@ public void analyzeInternal(ASTNode input) throws SemanticException { ast = (ASTNode) input.getChild(1); String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0)); String tableName = getDotName(qualified); - HashMap partSpec = DDLSemanticAnalyzer.getPartSpec((ASTNode) input.getChild(2)); + HashMap partSpec = null; + ASTNode partSpecNode = (ASTNode)input.getChild(2); + if (partSpecNode != null) { + //We can use alter table partition rename to convert/normalize the legacy partition column + //values. In so, we should not enable the validation to the old partition spec passed in + //this command. + if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART && !HiveConf + .getBoolVar(conf, HiveConf.ConfVars.HIVE_PARTITION_CHECK_OLD_COLUMN_TYPE_IN_RENAME)) { + partSpec = getPartSpec(partSpecNode); + } else { + partSpec = getValidatedPartSpec(getTable(tableName), partSpecNode, conf, false); + } + } + if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAME) { analyzeAlterTableRename(qualified, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_TOUCH) { @@ -667,7 +680,7 @@ private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws Se Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(1))); // Get the partition specs - Map partSpecs = getPartSpec((ASTNode) ast.getChild(0)); + Map partSpecs = getValidatedPartSpec(sourceTable, (ASTNode)ast.getChild(0), conf, true); validatePartitionValues(partSpecs); boolean sameColumns = MetaStoreUtils.compareFieldColumns( destTable.getAllCols(), sourceTable.getAllCols()); @@ -866,9 +879,11 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { } } else { if (isFullSpec(table, partSpec)) { + validatePartSpec(table, partSpec, (ASTNode) root.getChild(1), conf, true); Partition partition = getPartition(table, partSpec, true); outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); } else { + validatePartSpec(table, partSpec, (ASTNode) root.getChild(1), conf, false); for (Partition partition : getPartitions(table, partSpec, false)) { outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); } @@ -1153,7 +1168,7 @@ private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { HashMap partSpec = null; Tree part = ast.getChild(2); if (part != null) { - partSpec = extractPartitionSpecs(part); + partSpec = getValidatedPartSpec(getTable(qualified), (ASTNode)part, conf, false); } List> indexBuilder = getIndexBuilderMapRed(qualified, indexName, partSpec); rootTasks.addAll(indexBuilder); @@ -1884,18 +1899,6 @@ static public String getColPath( // or DESCRIBE table partition // check whether it is DESCRIBE table partition if (ast.getChildCount() == 2) { - ASTNode partNode = (ASTNode) ast.getChild(1); - HashMap partSpec = null; - try { - partSpec = getPartSpec(partNode); - } catch (SemanticException e) { - // get exception in resolving partition - // it could be DESCRIBE table key - // return null - // continue processing for DESCRIBE table key - return null; - } - Table tab = null; try { tab = db.getTable(tableName); @@ -1907,6 +1910,18 @@ static public String getColPath( throw new SemanticException(e.getMessage(), e); } + ASTNode partNode = (ASTNode) ast.getChild(1); + HashMap partSpec = null; + try { + partSpec = getValidatedPartSpec(tab, partNode, db.getConf(), false); + } catch (SemanticException e) { + // get exception in resolving partition + // it could be DESCRIBE table key + // return null + // continue processing for DESCRIBE table key + return null; + } + if (partSpec != null) { Partition part = null; try { @@ -2073,10 +2088,19 @@ private void analyzeDescDatabase(ASTNode ast) throws SemanticException { return partSpec; } + public static HashMap getValidatedPartSpec(Table table, ASTNode astNode, + HiveConf conf, boolean shouldBeFull) throws SemanticException { + HashMap partSpec = getPartSpec(astNode); + if (partSpec != null && !partSpec.isEmpty()) { + validatePartSpec(table, partSpec, astNode, conf, shouldBeFull); + } + return partSpec; + } + private void analyzeShowPartitions(ASTNode ast) throws SemanticException { ShowPartitionsDesc showPartsDesc; String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - List> partSpecs = getPartitionSpecs(ast); + List> partSpecs = getPartitionSpecs(getTable(tableName), ast); // We only can have a single partition spec assert (partSpecs.size() <= 1); Map partSpec = null; @@ -2190,7 +2214,7 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { if (child.getToken().getType() == HiveParser.Identifier) { dbName = unescapeIdentifier(child.getText()); } else if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) { - partSpec = getPartSpec(child); + partSpec = getValidatedPartSpec(getTable(tableNames), child, conf, false); } else { throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); } @@ -2291,8 +2315,8 @@ private void analyzeShowLocks(ASTNode ast) throws SemanticException { QualifiedNameUtil.getFullyQualifiedName((ASTNode) tableTypeExpr.getChild(0)); // get partition metadata if partition specified if (tableTypeExpr.getChildCount() == 2) { - ASTNode partspec = (ASTNode) tableTypeExpr.getChild(1); - partSpec = getPartSpec(partspec); + ASTNode partSpecNode = (ASTNode) tableTypeExpr.getChild(1); + partSpec = getValidatedPartSpec(getTable(tableName), partSpecNode, conf, false); } } else if (child.getType() == HiveParser.KW_EXTENDED) { isExtended = true; @@ -2368,7 +2392,7 @@ private void analyzeLockTable(ASTNode ast) throws SemanticException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)).toLowerCase(); String mode = unescapeIdentifier(ast.getChild(1).getText().toUpperCase()); - List> partSpecs = getPartitionSpecs(ast); + List> partSpecs = getPartitionSpecs(getTable(tableName), ast); // We only can have a single partition spec assert (partSpecs.size() <= 1); @@ -2421,7 +2445,7 @@ private void analyzeShowTxns(ASTNode ast) throws SemanticException { private void analyzeUnlockTable(ASTNode ast) throws SemanticException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - List> partSpecs = getPartitionSpecs(ast); + List> partSpecs = getPartitionSpecs(getTable(tableName), ast); // We only can have a single partition spec assert (partSpecs.size() <= 1); @@ -2563,12 +2587,13 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, HashMap oldPartSpec) throws SemanticException { - Map newPartSpec = extractPartitionSpecs(ast.getChild(0)); + Table tab = getTable(tblName, true); + validateAlterTableType(tab, AlterTableTypes.RENAMEPARTITION); + Map newPartSpec = + getValidatedPartSpec(tab, (ASTNode)ast.getChild(0), conf, false); if (newPartSpec == null) { throw new SemanticException("RENAME PARTITION Missing Destination" + ast); } - Table tab = getTable(tblName, true); - validateAlterTableType(tab, AlterTableTypes.RENAMEPARTITION); ReadEntity re = new ReadEntity(tab); re.noLockNeeded(); inputs.add(re); @@ -2734,9 +2759,8 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole addPartitionDesc.addPartition(currentPart, currentLocation); currentLocation = null; } - currentPart = getPartSpec(child); + currentPart = getValidatedPartSpec(tab, child, conf, true); validatePartitionValues(currentPart); // validate reserved values - validatePartSpec(tab, currentPart, child, conf, true); break; case HiveParser.TOK_PARTITIONLOCATION: // if location specified, set in partition @@ -2801,21 +2825,6 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole } } - private Partition getPartitionForOutput(Table tab, Map currentPart) - throws SemanticException { - validatePartitionValues(currentPart); - try { - Partition partition = db.getPartition(tab, currentPart, false); - if (partition != null) { - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.INSERT)); - } - return partition; - } catch (HiveException e) { - LOG.warn("wrong partition spec " + currentPart); - } - return null; - } - /** * Rewrite the metadata for one or more partitions in a table. Useful when * an external process modifies files on HDFS and you want the pre/post @@ -2834,7 +2843,7 @@ private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) inputs.add(new ReadEntity(tab)); // partition name to value - List> partSpecs = getPartitionSpecs(ast); + List> partSpecs = getPartitionSpecs(tab, ast); if (partSpecs.size() == 0) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( @@ -2862,10 +2871,10 @@ private void analyzeAlterTableArchive(String[] qualified, CommonTree ast, boolea throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); } + Table tab = getTable(qualified); // partition name to value - List> partSpecs = getPartitionSpecs(ast); + List> partSpecs = getPartitionSpecs(tab, ast); - Table tab = getTable(qualified); addTablePartsOutputs(tab, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK); validateAlterTableType(tab, AlterTableTypes.ARCHIVE); inputs.add(new ReadEntity(tab)); @@ -2912,7 +2921,7 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { tableName = getUnescapedName((ASTNode) ast.getChild(1)); } } - List> specs = getPartitionSpecs(ast); + List> specs = getPartitionSpecs(getTable(tableName), ast); MsckDesc checkDesc = new MsckDesc(tableName, specs, ctx.getResFile(), repair); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -2927,16 +2936,17 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { * @return A list of partition name to value mappings. * @throws SemanticException */ - private List> getPartitionSpecs(CommonTree ast) + private List> getPartitionSpecs(Table tbl, CommonTree ast) throws SemanticException { List> partSpecs = new ArrayList>(); int childIndex = 0; // get partition metadata if partition specified for (childIndex = 0; childIndex < ast.getChildCount(); childIndex++) { - Tree partspec = ast.getChild(childIndex); + ASTNode partSpecNode = (ASTNode)ast.getChild(childIndex); // sanity check - if (partspec.getType() == HiveParser.TOK_PARTSPEC) { - partSpecs.add(getPartSpec((ASTNode) partspec)); + if (partSpecNode.getType() == HiveParser.TOK_PARTSPEC) { + Map partSpec = getValidatedPartSpec(tbl, partSpecNode, conf, false); + partSpecs.add(partSpec); } } return partSpecs; @@ -2968,9 +2978,12 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { for (int i = 0; i < partSpecTree.getChildCount(); ++i) { CommonTree partSpecSingleKey = (CommonTree) partSpecTree.getChild(i); assert (partSpecSingleKey.getType() == HiveParser.TOK_PARTVAL); - String key = partSpecSingleKey.getChild(0).getText().toLowerCase(); + String key = stripIdentifierQuotes(partSpecSingleKey.getChild(0).getText()).toLowerCase(); String operator = partSpecSingleKey.getChild(1).getText(); - String val = stripQuotes(partSpecSingleKey.getChild(2).getText()); + ASTNode partValNode = (ASTNode)partSpecSingleKey.getChild(2); + TypeCheckCtx typeCheckCtx = new TypeCheckCtx(null); + ExprNodeConstantDesc valExpr = (ExprNodeConstantDesc)TypeCheckProcFactory + .genExprNode(partValNode, typeCheckCtx).get(partValNode); String type = colTypes.get(key); if (type == null) { @@ -2978,12 +2991,16 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { } // Create the corresponding hive expression to filter on partition columns. PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type); - Converter converter = ObjectInspectorConverters.getConverter( - TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(TypeInfoFactory.stringTypeInfo), - TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(pti)); + Object val = valExpr.getValue(); + if (!valExpr.getTypeString().equals(type)) { + Converter converter = ObjectInspectorConverters.getConverter( + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(valExpr.getTypeInfo()), + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(pti)); + val = converter.convert(valExpr.getValue()); + } ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, null, true); - ExprNodeGenericFuncDesc op = makeBinaryPredicate( - operator, column, new ExprNodeConstantDesc(pti, converter.convert(val))); + ExprNodeGenericFuncDesc op = makeBinaryPredicate(operator, column, + new ExprNodeConstantDesc(pti, val)); // If it's multi-expr filter (e.g. a='5', b='2012-01-02'), AND with previous exprs. expr = (expr == null) ? op : makeBinaryPredicate("and", expr, op); names.add(key); diff --git a/ql/src/test/queries/clientpositive/partition_coltype_literals.q b/ql/src/test/queries/clientpositive/partition_coltype_literals.q new file mode 100644 index 0000000..05efabd --- /dev/null +++ b/ql/src/test/queries/clientpositive/partition_coltype_literals.q @@ -0,0 +1,81 @@ +set hive.partition.check.old.column.type.in.rename=true; + +drop table if exists partcoltypenum; +create table partcoltypenum (key int, value string) partitioned by (tint tinyint, sint smallint, bint bigint); + +-- add partition +alter table partcoltypenum add partition(tint=100Y, sint=20000S, bint=300000000000L); + +-- describe partition +describe formatted partcoltypenum partition (tint=100, sint=20000S, bint='300000000000'); + +-- change partition file format +alter table partcoltypenum partition(tint=100, sint=20000S, bint='300000000000') set fileformat rcfile; +describe formatted partcoltypenum partition (tint=100Y, sint=20000S, bint=300000000000L); + +-- change partition clusterby, sortby and bucket +alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) clustered by (key) sorted by (key desc) into 4 buckets; +describe formatted partcoltypenum partition (tint=100Y, sint=20000S, bint=300000000000L); + +-- rename partition +alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) rename to partition (tint=110Y, sint=22000S, bint=330000000000L); +describe formatted partcoltypenum partition (tint=110Y, sint=22000, bint='330000000000'); + +-- insert partition +insert into partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) select key, value from src limit 10; +insert into partcoltypenum partition (tint=110, sint=22000, bint=330000000000) select key, value from src limit 20; + +-- select partition +select count(1) from partcoltypenum where tint=110Y and sint=22000S and bint=330000000000L; +select count(1) from partcoltypenum where tint=110Y and sint=22000 and bint='330000000000'; + +-- analyze partition statistics and columns statistics +analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics; +describe extended partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L); + +analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics for columns; +describe formatted partcoltypenum.key partition (tint=110Y, sint=22000S, bint=330000000000L); +describe formatted partcoltypenum.value partition (tint=110Y, sint=22000S, bint=330000000000L); + +-- change table column type for partition +alter table partcoltypenum change key key decimal(10,0); +alter table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) change key key decimal(10,0); +describe formatted partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L); + +-- change partititon column type +alter table partcoltypenum partition column (tint decimal(3,0)); +describe formatted partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L); + +-- show partition +show partitions partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L); + +-- drop partition +alter table partcoltypenum drop partition (tint=110BD, sint=22000S, bint=330000000000L); +show partitions partcoltypenum; + +-- change partition file location +insert into partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) select key, value from src limit 10; +describe formatted partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L); +alter table partcoltypenum partition(tint=100BD, sint=20000S, bint=300000000000L) set location "file:/test/test/tint=1/sint=2/bint=3"; +describe formatted partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L); + +drop table partcoltypenum; + +-- some tests for configuration properites: +-- hive.partition.check.column.type=false, it would not enforce the type check, conversion or normalization +-- hive.partition.check.old.column.type.in.rename=false, it allows not to check partVal in old partSpec, so rename can be used to correct legacy data +drop table if exists partcoltypeothers; +create table partcoltypeothers (key int, value string) partitioned by (decpart decimal(6,2), datepart date); + +set hive.partition.check.column.type=false; +insert into partcoltypeothers partition (decpart = 1000.01BD, datepart = date '2015-4-13') select key, value from src limit 10; +show partitions partcoltypeothers; + +set hive.partition.check.old.column.type.in.rename=false; +set hive.partition.check.column.type=true; +alter table partcoltypeothers partition(decpart = '1000.01BD', datepart = date '2015-4-13') rename to partition (decpart = 1000.01BD, datepart = date '2015-4-13'); +show partitions partcoltypeothers; + +drop table partcoltypeothers; + + diff --git a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out new file mode 100644 index 0000000..216f887 --- /dev/null +++ b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out @@ -0,0 +1,653 @@ +PREHOOK: query: drop table if exists partcoltypenum +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists partcoltypenum +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table partcoltypenum (key int, value string) partitioned by (tint tinyint, sint smallint, bint bigint) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partcoltypenum +POSTHOOK: query: create table partcoltypenum (key int, value string) partitioned by (tint tinyint, sint smallint, bint bigint) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partcoltypenum +PREHOOK: query: -- add partition +alter table partcoltypenum add partition(tint=100Y, sint=20000S, bint=300000000000L) +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@partcoltypenum +POSTHOOK: query: -- add partition +alter table partcoltypenum add partition(tint=100Y, sint=20000S, bint=300000000000L) +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@partcoltypenum +POSTHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +PREHOOK: query: -- describe partition +describe formatted partcoltypenum partition (tint=100, sint=20000S, bint='300000000000') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: -- describe partition +describe formatted partcoltypenum partition (tint=100, sint=20000S, bint='300000000000') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +tint tinyint +sint smallint +bint bigint + +# Detailed Partition Information +Partition Value: [100, 20000, 300000000000] +Database: default +Table: partcoltypenum +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- change partition file format +alter table partcoltypenum partition(tint=100, sint=20000S, bint='300000000000') set fileformat rcfile +PREHOOK: type: ALTERPARTITION_FILEFORMAT +PREHOOK: Input: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: query: -- change partition file format +alter table partcoltypenum partition(tint=100, sint=20000S, bint='300000000000') set fileformat rcfile +POSTHOOK: type: ALTERPARTITION_FILEFORMAT +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +PREHOOK: query: describe formatted partcoltypenum partition (tint=100Y, sint=20000S, bint=300000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum partition (tint=100Y, sint=20000S, bint=300000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +tint tinyint +sint smallint +bint bigint + +# Detailed Partition Information +Partition Value: [100, 20000, 300000000000] +Database: default +Table: partcoltypenum +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE false +#### A masked pattern was here #### + numFiles 0 + numRows -1 + rawDataSize -1 + totalSize 0 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- change partition clusterby, sortby and bucket +alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) clustered by (key) sorted by (key desc) into 4 buckets +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: query: -- change partition clusterby, sortby and bucket +alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) clustered by (key) sorted by (key desc) into 4 buckets +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +PREHOOK: query: describe formatted partcoltypenum partition (tint=100Y, sint=20000S, bint=300000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum partition (tint=100Y, sint=20000S, bint=300000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +tint tinyint +sint smallint +bint bigint + +# Detailed Partition Information +Partition Value: [100, 20000, 300000000000] +Database: default +Table: partcoltypenum +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE false +#### A masked pattern was here #### + numFiles 0 + numRows -1 + rawDataSize -1 + totalSize 0 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:0)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- rename partition +alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) rename to partition (tint=110Y, sint=22000S, bint=330000000000L) +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: query: -- rename partition +alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) rename to partition (tint=110Y, sint=22000S, bint=330000000000L) +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +PREHOOK: query: describe formatted partcoltypenum partition (tint=110Y, sint=22000, bint='330000000000') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum partition (tint=110Y, sint=22000, bint='330000000000') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +tint tinyint +sint smallint +bint bigint + +# Detailed Partition Information +Partition Value: [110, 22000, 330000000000] +Database: default +Table: partcoltypenum +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE false +#### A masked pattern was here #### + numFiles 0 + numRows -1 + rawDataSize -1 + totalSize 0 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:0)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- insert partition +insert into partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) select key, value from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: query: -- insert partition +insert into partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) select key, value from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: Lineage: partcoltypenum PARTITION(tint=110,sint=22000,bint=330000000000).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partcoltypenum PARTITION(tint=110,sint=22000,bint=330000000000).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert into partcoltypenum partition (tint=110, sint=22000, bint=330000000000) select key, value from src limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: query: insert into partcoltypenum partition (tint=110, sint=22000, bint=330000000000) select key, value from src limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: Lineage: partcoltypenum PARTITION(tint=110,sint=22000,bint=330000000000).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partcoltypenum PARTITION(tint=110,sint=22000,bint=330000000000).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- select partition +select count(1) from partcoltypenum where tint=110Y and sint=22000S and bint=330000000000L +PREHOOK: type: QUERY +PREHOOK: Input: default@partcoltypenum +PREHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +#### A masked pattern was here #### +POSTHOOK: query: -- select partition +select count(1) from partcoltypenum where tint=110Y and sint=22000S and bint=330000000000L +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +#### A masked pattern was here #### +30 +PREHOOK: query: select count(1) from partcoltypenum where tint=110Y and sint=22000 and bint='330000000000' +PREHOOK: type: QUERY +PREHOOK: Input: default@partcoltypenum +PREHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from partcoltypenum where tint=110Y and sint=22000 and bint='330000000000' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +#### A masked pattern was here #### +30 +PREHOOK: query: -- analyze partition statistics and columns statistics +analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@partcoltypenum +PREHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +PREHOOK: Output: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: query: -- analyze partition statistics and columns statistics +analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: Output: default@partcoltypenum +POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +PREHOOK: query: describe extended partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe extended partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +key int +value string +tint tinyint +sint smallint +bint bigint + +# Partition Information +# col_name data_type comment + +tint tinyint +sint smallint +bint bigint + +#### A masked pattern was here #### +PREHOOK: query: analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@partcoltypenum +PREHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +#### A masked pattern was here #### +POSTHOOK: query: analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +#### A masked pattern was here #### +PREHOOK: query: describe formatted partcoltypenum.key partition (tint=110Y, sint=22000S, bint=330000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum.key partition (tint=110Y, sint=22000S, bint=330000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key int 27 484 0 18 from deserializer +PREHOOK: query: describe formatted partcoltypenum.value partition (tint=110Y, sint=22000S, bint=330000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum.value partition (tint=110Y, sint=22000S, bint=330000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +value string 0 18 6.766666666666667 7 from deserializer +PREHOOK: query: -- change table column type for partition +alter table partcoltypenum change key key decimal(10,0) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum +POSTHOOK: query: -- change table column type for partition +alter table partcoltypenum change key key decimal(10,0) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Output: default@partcoltypenum +PREHOOK: query: alter table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) change key key decimal(10,0) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: query: alter table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) change key key decimal(10,0) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +PREHOOK: query: describe formatted partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type comment + +key decimal(10,0) +value string + +# Partition Information +# col_name data_type comment + +tint tinyint +sint smallint +bint bigint + +# Detailed Partition Information +Partition Value: [110, 22000, 330000000000] +Database: default +Table: partcoltypenum +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true +#### A masked pattern was here #### + numFiles 2 + numRows 30 + rawDataSize 316 + totalSize 346 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- change partititon column type +alter table partcoltypenum partition column (tint decimal(3,0)) +PREHOOK: type: ALTERTABLE_PARTCOLTYPE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: -- change partititon column type +alter table partcoltypenum partition column (tint decimal(3,0)) +POSTHOOK: type: ALTERTABLE_PARTCOLTYPE +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Output: default@partcoltypenum +PREHOOK: query: describe formatted partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type comment + +key decimal(10,0) +value string + +# Partition Information +# col_name data_type comment + +tint decimal(3,0) +sint smallint +bint bigint + +# Detailed Partition Information +Partition Value: [110, 22000, 330000000000] +Database: default +Table: partcoltypenum +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true +#### A masked pattern was here #### + numFiles 2 + numRows 30 + rawDataSize 316 + totalSize 346 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- show partition +show partitions partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L) +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: -- show partition +show partitions partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L) +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@partcoltypenum +tint=110/sint=22000/bint=330000000000 +PREHOOK: query: -- drop partition +alter table partcoltypenum drop partition (tint=110BD, sint=22000S, bint=330000000000L) +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +POSTHOOK: query: -- drop partition +alter table partcoltypenum drop partition (tint=110BD, sint=22000S, bint=330000000000L) +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 +PREHOOK: query: show partitions partcoltypenum +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: show partitions partcoltypenum +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@partcoltypenum +PREHOOK: query: -- change partition file location +insert into partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) select key, value from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: query: -- change partition file location +insert into partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) select key, value from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: Lineage: partcoltypenum PARTITION(tint=100,sint=20000,bint=300000000000).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partcoltypenum PARTITION(tint=100,sint=20000,bint=300000000000).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type comment + +key decimal(10,0) +value string + +# Partition Information +# col_name data_type comment + +tint decimal(3,0) +sint smallint +bint bigint + +# Detailed Partition Information +Partition Value: [100, 20000, 300000000000] +Database: default +Table: partcoltypenum +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 10 + rawDataSize 104 + totalSize 114 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +#### A masked pattern was here #### +PREHOOK: type: ALTERPARTITION_LOCATION +PREHOOK: Input: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +#### A masked pattern was here #### +POSTHOOK: type: ALTERPARTITION_LOCATION +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Input: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +POSTHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 +#### A masked pattern was here #### +PREHOOK: query: describe formatted partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partcoltypenum +POSTHOOK: query: describe formatted partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partcoltypenum +# col_name data_type comment + +key decimal(10,0) +value string + +# Partition Information +# col_name data_type comment + +tint decimal(3,0) +sint smallint +bint bigint + +# Detailed Partition Information +Partition Value: [100, 20000, 300000000000] +Database: default +Table: partcoltypenum +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true +#### A masked pattern was here #### + numFiles 1 + numRows 10 + rawDataSize 104 + totalSize 114 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table partcoltypenum +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@partcoltypenum +PREHOOK: Output: default@partcoltypenum +POSTHOOK: query: drop table partcoltypenum +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@partcoltypenum +POSTHOOK: Output: default@partcoltypenum +PREHOOK: query: -- some tests for configuration properites: +-- hive.partition.check.column.type=false, it would not enforce the type check, conversion or normalization +-- hive.partition.check.old.column.type.in.rename=false, it allows not to check partVal in old partSpec, so rename can be used to correct legacy data +drop table if exists partcoltypeothers +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- some tests for configuration properites: +-- hive.partition.check.column.type=false, it would not enforce the type check, conversion or normalization +-- hive.partition.check.old.column.type.in.rename=false, it allows not to check partVal in old partSpec, so rename can be used to correct legacy data +drop table if exists partcoltypeothers +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table partcoltypeothers (key int, value string) partitioned by (decpart decimal(6,2), datepart date) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partcoltypeothers +POSTHOOK: query: create table partcoltypeothers (key int, value string) partitioned by (decpart decimal(6,2), datepart date) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partcoltypeothers +PREHOOK: query: insert into partcoltypeothers partition (decpart = 1000.01BD, datepart = date '2015-4-13') select key, value from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partcoltypeothers@decpart=1000.01BD/datepart=2015-4-13 +POSTHOOK: query: insert into partcoltypeothers partition (decpart = 1000.01BD, datepart = date '2015-4-13') select key, value from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partcoltypeothers@decpart=1000.01BD/datepart=2015-4-13 +POSTHOOK: Lineage: partcoltypeothers PARTITION(decpart=1000.01BD,datepart=2015-4-13).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partcoltypeothers PARTITION(decpart=1000.01BD,datepart=2015-4-13).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions partcoltypeothers +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@partcoltypeothers +POSTHOOK: query: show partitions partcoltypeothers +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@partcoltypeothers +decpart=1000.01BD/datepart=2015-4-13 +PREHOOK: query: alter table partcoltypeothers partition(decpart = '1000.01BD', datepart = date '2015-4-13') rename to partition (decpart = 1000.01BD, datepart = date '2015-4-13') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@partcoltypeothers +PREHOOK: Output: default@partcoltypeothers@decpart=1000.01BD/datepart=2015-4-13 +POSTHOOK: query: alter table partcoltypeothers partition(decpart = '1000.01BD', datepart = date '2015-4-13') rename to partition (decpart = 1000.01BD, datepart = date '2015-4-13') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: default@partcoltypeothers +POSTHOOK: Input: default@partcoltypeothers@decpart=1000.01BD/datepart=2015-4-13 +POSTHOOK: Output: default@partcoltypeothers@decpart=1000.01/datepart=2015-04-13 +POSTHOOK: Output: default@partcoltypeothers@decpart=1000.01BD/datepart=2015-4-13 +PREHOOK: query: show partitions partcoltypeothers +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@partcoltypeothers +POSTHOOK: query: show partitions partcoltypeothers +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@partcoltypeothers +decpart=1000.01/datepart=2015-04-13 +PREHOOK: query: drop table partcoltypeothers +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@partcoltypeothers +PREHOOK: Output: default@partcoltypeothers +POSTHOOK: query: drop table partcoltypeothers +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@partcoltypeothers +POSTHOOK: Output: default@partcoltypeothers