diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 4984138ef8..7d77cf54e0 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -394,6 +394,15 @@ public Table alterTable(String catName, String dbName, String name, Table newTab return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + String defaultPartName, byte[] exprBytes, String order, + short max_parts) throws MetaException, NoSuchObjectException { + + return objectStore.listPartitionNames(catName, db_name, tbl_name, + defaultPartName, exprBytes, order, max_parts); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, diff --git parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 2b865f3cf7..47acd009b1 100644 --- parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -1674,7 +1674,7 @@ showStatement | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tableName ((KW_FROM|KW_IN) db_name=identifier)? (KW_LIKE showStmtIdentifier|showStmtIdentifier)? -> ^(TOK_SHOWCOLUMNS tableName (TOK_FROM $db_name)? showStmtIdentifier?) | KW_SHOW KW_FUNCTIONS (KW_LIKE showFunctionIdentifier)? -> ^(TOK_SHOWFUNCTIONS KW_LIKE? showFunctionIdentifier?) - | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec?) + | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? whereClause? orderByClause? limitClause? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec? whereClause? orderByClause? limitClause?) | KW_SHOW KW_CREATE ( (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) db_name=identifier -> ^(TOK_SHOW_CREATEDATABASE $db_name) | diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java index 2f659e6382..4bb27a55a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java @@ -21,9 +21,11 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -32,7 +34,15 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveTableName; +import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.type.ExprNodeTypeCheck; +import org.apache.hadoop.hive.ql.parse.type.TypeCheckCtx; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; /** * Analyzer for show partition commands. @@ -57,10 +67,74 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { inputs.add(new ReadEntity(table)); ShowPartitionsDesc desc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); + analyzeShowPartitionsConstraints(ast, table, desc); Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); rootTasks.add(task); task.setFetchSource(true); setFetchTask(createFetchTask(ShowPartitionsDesc.SCHEMA)); } + + private void analyzeShowPartitionsConstraints(ASTNode ast, Table tab, + ShowPartitionsDesc showPartsDesc) throws SemanticException { + for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) { + ASTNode astChild = (ASTNode)ast.getChild(childIndex); + if (astChild.getType() == HiveParser.TOK_LIMIT) { + short limit = Short.valueOf(((ASTNode)astChild.getChild(0)).getText()); + showPartsDesc.setLimit(limit); + } else { + RowResolver rwsch = new RowResolver(); + for (FieldSchema part_col : tab.getPartCols()) { + rwsch.put(tab.getTableName(), part_col.getName(), new ColumnInfo(part_col.getName(), + TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true)); + } + TypeCheckCtx tcCtx = new TypeCheckCtx(rwsch); + if (astChild.getType() == HiveParser.TOK_WHERE) { + ASTNode conds = (ASTNode) astChild.getChild(0); + Map nodeOutputs = ExprNodeTypeCheck.genExprNode(conds, tcCtx); + ExprNodeDesc desc = nodeOutputs.get(conds); + if (!(desc instanceof ExprNodeGenericFuncDesc) + || !((ExprNodeGenericFuncDesc) desc).getTypeInfo() + .equals(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.BOOLEAN_TYPE_NAME))) { + throw new SemanticException("Not a filter expr: " + (desc == null ? "null" : desc.getExprString())); + } + showPartsDesc.setCond(desc); + } + + if (astChild.getType() == HiveParser.TOK_ORDERBY) { + StringBuilder colIndices = new StringBuilder(); + StringBuilder order = new StringBuilder(); + int ccount = astChild.getChildCount(); + for (int i = 0; i < ccount; ++i) { + ASTNode cl = (ASTNode) astChild.getChild(i); + if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { + order.append("+"); + cl = (ASTNode) cl.getChild(0).getChild(0); + } else if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEDESC) { + order.append("-"); + cl = (ASTNode) cl.getChild(0).getChild(0); + } else { + order.append("+"); + } + Map nodeOutputs = ExprNodeTypeCheck.genExprNode(cl, tcCtx); + ExprNodeDesc desc = nodeOutputs.get(cl); + if (!(desc instanceof ExprNodeColumnDesc)) { + throw new SemanticException("Only partition keys are allowed for " + + "ordering partition names, input: " + cl.toStringTree()); + } + String _col = ((ExprNodeColumnDesc) desc).getColumn(); + for (int k = 0; k < tab.getPartCols().size(); k++) { + String column = tab.getPartCols().get(k).getName(); + if (_col.equalsIgnoreCase(column)) { + colIndices.append(k).append(","); + break; + } + } + } + colIndices.setLength(colIndices.length() - 1); + showPartsDesc.setOrder(colIndices.append(":").append(order).toString()); + } + } + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java index eeef253af8..58acd1c66d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** * DDL task description for SHOW PARTITIONS commands. @@ -38,6 +39,9 @@ private final String tabName; private final String resFile; private final Map partSpec; + private short limit = -1; + private String order; + private ExprNodeDesc cond; public ShowPartitionsDesc(String tabName, Path resFile, Map partSpec) { this.tabName = tabName; @@ -59,4 +63,35 @@ public String getTabName() { public String getResFile() { return resFile; } + + public void setLimit(short limit) { + this.limit = limit; + } + + public void setOrder(String order) { + this.order = order; + } + + public void setCond(ExprNodeDesc cond) { + this.cond = cond; + } + + @Explain(displayName = "limit", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public short getLimit() { + return limit; + } + + @Explain(displayName = "order", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getOrder() { + return order; + } + + public ExprNodeDesc getCond() { + return cond; + } + + @Explain(displayName = "cond", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getFilterStr() { + return cond.getExprString(); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java index 2b1a002748..4ca1b6ae14 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java @@ -19,15 +19,31 @@ package org.apache.hadoop.hive.ql.ddl.table.partition.show; import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; /** * Operation process of showing the partitions of a table. @@ -44,11 +60,48 @@ public int execute() throws HiveException { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, desc.getTabName()); } - List parts = null; - if (desc.getPartSpec() != null) { - parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), desc.getPartSpec(), (short) -1); + List parts; + if (desc.getCond() != null || desc.getOrder() != null) { + ExprNodeDesc pred = desc.getCond(); + if (desc.getPartSpec() != null) { + List fieldSchemas = tbl.getPartitionKeys(); + Map infoMap = new HashMap(); + for (FieldSchema schema : fieldSchemas) { + ColumnInfo info = new ColumnInfo(schema.getName(), + TypeInfoFactory.getPrimitiveTypeInfo(schema.getType()), null, true); + infoMap.put(schema.getName().toLowerCase(), info); + } + for (Map.Entry entry : desc.getPartSpec().entrySet()) { + ColumnInfo part_col = infoMap.get(entry.getKey().toLowerCase()); + TypeInfo stringTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + Object part_val = entry.getValue(); + if (!part_col.getType().equals(stringTypeInfo)) { + part_val = ObjectInspectorConverters.getConverter( + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(stringTypeInfo), + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(part_col.getType())) + .convert(part_val); + if (part_val == null) { + throw new HiveException("Cannot convert to " + part_col.getType() + " from: " + + stringTypeInfo + ", value: " + entry.getValue()); + } + } + List children = new ArrayList(); + children.add(new ExprNodeColumnDesc(part_col)); + children.add(new ExprNodeConstantDesc(part_col.getType(), part_val)); + ExprNodeDesc exprNodeDesc = ExprNodeGenericFuncDesc.newInstance( + FunctionRegistry.getFunctionInfo("=").getGenericUDF(), + children); + pred = (pred == null) ? exprNodeDesc : + ExprNodeDescUtils.mergePredicates(exprNodeDesc, pred); + } + } + + parts = context.getDb().getPartitionNames(tbl, + (ExprNodeGenericFuncDesc) pred, desc.getOrder(), desc.getLimit()); + } else if (desc.getPartSpec() != null) { + parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), desc.getPartSpec(), desc.getLimit()); } else { - parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short) -1); + parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), desc.getLimit()); } // write the results in the file diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 1f9fb3b897..2c4be1bac4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -3600,6 +3600,26 @@ public boolean dropPartition(String dbName, String tableName, List parti return names; } + public List getPartitionNames(Table tbl, ExprNodeGenericFuncDesc expr, String order, + short max_parts) throws HiveException { + List names = null; + byte[] exprBytes = {(byte)-1}; + if (expr != null) { + exprBytes = SerializationUtilities.serializeExpressionToKryo(expr); + } + String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + try { + names = getMSC().listPartitionNames(tbl.getCatalogName(), tbl.getDbName(), + tbl.getTableName(), defaultPartitionName, exprBytes, order, max_parts); + } catch (NoSuchObjectException nsoe) { + return Lists.newArrayList(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + return names; + } + /** * get all the partitions that the table has * diff --git ql/src/test/queries/clientpositive/show_partitions2.q ql/src/test/queries/clientpositive/show_partitions2.q new file mode 100644 index 0000000000..703d051247 --- /dev/null +++ ql/src/test/queries/clientpositive/show_partitions2.q @@ -0,0 +1,50 @@ +CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE); +ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-09'); +ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-10'); +ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-11'); +ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-19'); + +SHOW PARTITIONS mpart1; +SHOW PARTITIONS mpart1 PARTITION (ds = '1980-11-19'); +SHOW PARTITIONS mpart1 WHERE ds > '1980-11-10' AND ds < '1980-11-20'; +SHOW PARTITIONS mpart1 WHERE ds > '1980-11-09' ORDER BY ds DESC LIMIT 2; +SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 2; +SHOW PARTITIONS mpart1 LIMIT 2; +SHOW PARTITIONS mpart1 ORDER BY ds DESC; + +CREATE TABLE mpart2 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hs int, rs string); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=17, rs='EU'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=19, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=22, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=10, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=20, rs='AF'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=16, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=22, rs='AS'); + +CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hs INT, rs STRING); +INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA'); + +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; +INSERT INTO TABLE mpart2 PARTITION(ds, hs, rs) SELECT * FROM srcpart1; + +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10'; +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20; +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC; +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC LIMIT 4; +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4; +SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20; +SHOW PARTITIONS mpart2 WHERE hs > 9 and hs < 19 ORDER BY hs DESC, ds; + +SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC; +SHOW PARTITIONS mpart2 ORDER BY hs ASC, ds DESC; +SHOW PARTITIONS mpart2 PARTITION (rs='AS') ORDER BY ds DESC; +SHOW PARTITIONS mpart2 ORDER BY hs DESC LIMIT 3; + +SHOW PARTITIONS mpart2 limit 3; +SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3; \ No newline at end of file diff --git ql/src/test/results/clientpositive/show_partitions2.q.out ql/src/test/results/clientpositive/show_partitions2.q.out new file mode 100644 index 0000000000..0bd8c230c2 --- /dev/null +++ ql/src/test/results/clientpositive/show_partitions2.q.out @@ -0,0 +1,387 @@ +PREHOOK: query: CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mpart1 +POSTHOOK: query: CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mpart1 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-09') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-09') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-10') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-10') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-11') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-11') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-11 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-19') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-19') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-19 +PREHOOK: query: SHOW PARTITIONS mpart1 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-09 +ds=1980-11-10 +ds=1980-11-11 +ds=1980-11-19 +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION (ds = '1980-11-19') +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION (ds = '1980-11-19') +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-19 +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds > '1980-11-10' AND ds < '1980-11-20' +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds > '1980-11-10' AND ds < '1980-11-20' +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-11 +ds=1980-11-19 +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds > '1980-11-09' ORDER BY ds DESC LIMIT 2 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds > '1980-11-09' ORDER BY ds DESC LIMIT 2 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-19 +ds=1980-11-11 +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 2 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 2 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10 +PREHOOK: query: SHOW PARTITIONS mpart1 LIMIT 2 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 LIMIT 2 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-09 +ds=1980-11-10 +PREHOOK: query: SHOW PARTITIONS mpart1 ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-19 +ds=1980-11-11 +ds=1980-11-10 +ds=1980-11-09 +PREHOOK: query: CREATE TABLE mpart2 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hs int, rs string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mpart2 +POSTHOOK: query: CREATE TABLE mpart2 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hs int, rs string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mpart2 +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=17, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=17, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-09/hs=17/rs=EU +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=19, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=19, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-09/hs=19/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=22, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=22, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-09/hs=22/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=12/rs=EU +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=10, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=10, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=10/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=15/rs=EU +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=16/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=20, rs='AF') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=20, rs='AF') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=20/rs=AF +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=21/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=16, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=16, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-11/hs=16/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=22, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=22, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-11/hs=22/rs=AS +PREHOOK: query: CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hs INT, rs STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart1 +POSTHOOK: query: CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hs INT, rs STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart1 +PREHOOK: query: INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@srcpart1 +POSTHOOK: query: INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@srcpart1 +POSTHOOK: Lineage: srcpart1.ds SCRIPT [] +POSTHOOK: Lineage: srcpart1.hs SCRIPT [] +POSTHOOK: Lineage: srcpart1.key1 SCRIPT [] +POSTHOOK: Lineage: srcpart1.rs SCRIPT [] +POSTHOOK: Lineage: srcpart1.value1 SCRIPT [] +PREHOOK: query: INSERT INTO TABLE mpart2 PARTITION(ds, hs, rs) SELECT * FROM srcpart1 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart1 +PREHOOK: Output: default@mpart2 +POSTHOOK: query: INSERT INTO TABLE mpart2 PARTITION(ds, hs, rs) SELECT * FROM srcpart1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart1 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=21/rs=NA +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +POSTHOOK: Output: default@mpart2@ds=1980-11-11/hs=12/rs=AS +POSTHOOK: Output: default@mpart2@ds=1980-11-11/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +POSTHOOK: Output: default@mpart2@ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-10,hs=21,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-10,hs=21,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-10,hs=__HIVE_DEFAULT_PARTITION__,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-10,hs=__HIVE_DEFAULT_PARTITION__,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-11,hs=12,rs=AS).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-11,hs=12,rs=AS).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-11,hs=__HIVE_DEFAULT_PARTITION__,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-11,hs=__HIVE_DEFAULT_PARTITION__,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=__HIVE_DEFAULT_PARTITION__,hs=__HIVE_DEFAULT_PARTITION__,rs=AS).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=__HIVE_DEFAULT_PARTITION__,hs=__HIVE_DEFAULT_PARTITION__,rs=AS).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=10/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=21/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=10/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC LIMIT 4 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC LIMIT 4 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AF +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=15/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=21/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE hs > 9 and hs < 19 ORDER BY hs DESC, ds +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE hs > 9 and hs < 19 ORDER BY hs DESC, ds +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-09/hs=17/rs=EU +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-11/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-11/hs=12/rs=AS +ds=1980-11-10/hs=10/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-11/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-11/hs=22/rs=AS +ds=1980-11-09/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-09/hs=19/rs=AS +ds=1980-11-09/hs=17/rs=EU +ds=1980-11-11/hs=16/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-11/hs=12/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=10/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs ASC, ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs ASC, ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=10/rs=AS +ds=1980-11-11/hs=12/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-11/hs=16/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-09/hs=17/rs=EU +ds=1980-11-09/hs=19/rs=AS +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-11/hs=22/rs=AS +ds=1980-11-09/hs=22/rs=AS +ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-11/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart2 PARTITION (rs='AS') ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 PARTITION (rs='AS') ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-11/hs=12/rs=AS +ds=1980-11-11/hs=22/rs=AS +ds=1980-11-11/hs=16/rs=AS +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=10/rs=AS +ds=1980-11-09/hs=22/rs=AS +ds=1980-11-09/hs=19/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs DESC LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs DESC LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-11/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart2 limit 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 limit 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-09/hs=17/rs=EU +ds=1980-11-09/hs=19/rs=AS +ds=1980-11-09/hs=22/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=10/rs=AS +ds=1980-11-10/hs=15/rs=EU diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 205c867db1..dfe09b473f 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2343,6 +2343,27 @@ public boolean tableExists(String catName, String dbName, String tableName) thro isClientFilterEnabled, filterHook, catName, db_name, tbl_name, partNames); } + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + String defaultPartName, byte[] exprBytes, String order, short max_parts) + throws MetaException, TException, NoSuchObjectException { + + PartitionsByExprRequest req = new PartitionsByExprRequest( + db_name, tbl_name, ByteBuffer.wrap(exprBytes)); + if (defaultPartName != null) { + req.setDefaultPartitionName(defaultPartName); + } + if (max_parts >= 0) { + req.setMaxParts(max_parts); + } + if (order != null) { + req.setOrder(order); + } + req.setCatName(catName); + return FilterUtils.filterPartitionNamesIfEnabled(isClientFilterEnabled, filterHook, catName, + db_name, tbl_name, client.get_partition_names_req(req)); + } + @Override public int getNumPartitionsByFilter(String db_name, String tbl_name, String filter) throws TException { diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index b58b1e4a07..80c71a13bb 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1212,6 +1212,24 @@ PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tabl List part_vals, int max_parts) throws MetaException, TException, NoSuchObjectException; + /** + * Get a list of partition names matching the specified filter and return in order + * @param db_name database name. + * @param tbl_name table name. + * @param defaultPartName default partition name + * @param exprBytes expression, serialized from ExprNodeDesc + * @param order how the matching partitions names should places. + * @param max_parts maximum number of partition names to return, or -1 to return all that are + * found. + * @return list of matching partition names. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws NoSuchObjectException no such table. + */ + List listPartitionNames(String catName, String db_name, String tbl_name, + String defaultPartName, byte[] exprBytes, String order, short max_parts) + throws MetaException, TException, NoSuchObjectException; + /** * Get a list of partition values * @param request request diff --git standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 098ddec5dc..82835e6d68 100644 --- standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -763,8 +763,9 @@ struct PartitionsByExprRequest { 2: required string tblName, 3: required binary expr, 4: optional string defaultPartitionName, - 5: optional i16 maxParts=-1 - 6: optional string catName + 5: optional i16 maxParts=-1, + 6: optional string catName, + 7: optional string order } struct TableStatsResult { @@ -2238,6 +2239,9 @@ service ThriftHiveMetastore extends fb303.FacebookService 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) throws(1:MetaException o1, 2:NoSuchObjectException o2) + list get_partition_names_req(1:PartitionsByExprRequest req) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + // get the partitions matching the given partition filter list get_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter, 4:i16 max_parts=-1) diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 77d34047a4..079feb313b 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -6220,6 +6220,31 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n return ret; } + @Override + public List get_partition_names_req(PartitionsByExprRequest req) + throws MetaException, NoSuchObjectException, TException { + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + String db_name = req.getDbName(), tbl_name = req.getTblName(); + startTableFunction("get_partition_names_req", catName, + db_name, tbl_name); + fireReadTablePreEvent(catName, db_name, tbl_name); + List ret = null; + Exception ex = null; + try { + authorizeTableForPartitionMetadata(catName, db_name, tbl_name); + ret = getMS().listPartitionNames(catName, db_name, tbl_name, + req.getDefaultPartitionName(), req.getExpr(), req.getOrder(), req.getMaxParts()); + ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, + filterHook, catName, db_name, tbl_name, ret); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partition_names_req", ret != null, ex, tbl_name); + } + return ret; + } + @Override public List partition_name_to_vals(String part_name) throws TException { if (part_name.length() == 0) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index d1558876f1..7445a5e960 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -32,6 +32,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -496,6 +497,140 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ queryParams, pms.toArray(), queryText); } + public List getPartitionNamesViaSql(SqlFilterForPushdown filter, + String defaultPartName, String order, Integer max) throws MetaException { + String catName = filter.table.isSetCatName() ? filter.table.getCatName() : + DEFAULT_CATALOG_NAME; + if (filter.joins.isEmpty()) { + int psize = filter.table.getPartitionKeysSize(); + for (int i = 0; i < psize; i++) { + filter.joins.add(null); + } + } + Map orderMap = new LinkedHashMap(); + String[] parts; + if (order != null && (parts = order.split(":")).length == 2) { + String[] poses = parts[0].split(","); + if (poses.length != parts[1].length()) { + throw new MetaException("The length of partition keys and sort order" + + " do not mismatch, order: " + order); + } + + for (int i = 0; i < poses.length; i++) { + int pos = Integer.valueOf(poses[i]); + String sort = ('+' == parts[1].charAt(i)) ? "ASC" : "DESC"; + FieldSchema partitionKey = filter.table.getPartitionKeys().get(pos); + orderMap.put(pos, new String[]{sort, partitionKey.getType()}); + } + } + boolean dbHasJoinCastBug = DatabaseProduct.hasJoinOperationOrderBug(dbType); + + return getPartitionNamesViaSqlInternal(catName, + filter.table.getDbName(), filter.table.getTableName(), filter.filter, filter.params, + filter.joins, defaultPartName, orderMap, max, dbHasJoinCastBug); + } + + private List getPartitionNamesViaSqlInternal(String catName, String dbName, String tblName, + String sqlFilter, List paramsForFilter, List joins, String defaultPartName, + Map orderMap, Integer max, boolean dbHasJoinCastBug) throws MetaException { + StringBuilder orderColumns = new StringBuilder(), orderClause = new StringBuilder(); + int i = 0; + List paramsForOrder = new ArrayList(); + for (Map.Entry entry : orderMap.entrySet()) { + int partColIndex = entry.getKey(); + String orderAlias = "ODR" + (i++); + String tableValue, tableAlias; + if (joins.get(partColIndex) == null) { + tableAlias = "ORDER" + partColIndex; + joins.set(partColIndex, "inner join " + PARTITION_KEY_VALS + " \"" + tableAlias + + "\" on \"" + tableAlias + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\"" + + " and \"" + tableAlias + "\".\"INTEGER_IDX\" = " + partColIndex); + tableValue = " \"" + tableAlias + "\".\"PART_KEY_VAL\" "; + } else { + tableAlias = "FILTER" + partColIndex; + tableValue = " \"" + tableAlias + "\".\"PART_KEY_VAL\" "; + } + + String tableColumn = tableValue; + PartitionFilterGenerator.FilterType type = + PartitionFilterGenerator.FilterType.fromType(entry.getValue()[1]); + if (type == PartitionFilterGenerator.FilterType.Date) { + if (dbType == DatabaseProduct.ORACLE) { + tableValue = "TO_DATE(" + tableValue + ", 'YYYY-MM-DD')"; + } else { + tableValue = "cast(" + tableValue + " as date)"; + } + } else if (type == PartitionFilterGenerator.FilterType.Integral) { + tableValue = "CAST(" + tableColumn + " AS decimal(21,0))"; + } + String tableValue0 = tableValue; + tableValue = "(case when " + tableColumn + " <> ?"; + paramsForOrder.add(defaultPartName); + if (dbHasJoinCastBug) { + tableValue += (" and " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + + DBS + ".\"CTLG_NAME\" = ? and " + + "\"" + tableAlias + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\" and " + + "\"" + tableAlias + "\".\"INTEGER_IDX\" = " + partColIndex); + paramsForOrder.add(tblName.toLowerCase()); + paramsForOrder.add(dbName.toLowerCase()); + paramsForOrder.add(catName.toLowerCase()); + } + tableValue += " then " + tableValue0 + " else null end) AS \"" + orderAlias + "\""; + orderColumns.append(tableValue).append(","); + orderClause.append("\"").append(orderAlias).append("\" ") + .append(entry.getValue()[0]).append(","); + } + + for (int j = 0; j < joins.size(); j++) { + if (joins.get(j) == null) { + joins.remove(j--); + } + } + if (orderClause.length() > 0) { + orderClause.setLength(orderClause.length() - 1); + orderColumns.setLength(orderColumns.length() - 1); + } + + String orderCls = " order by " + + (orderClause.length() > 0 ? orderClause.toString() : "\"PART_NAME\" asc"); + String columns = orderColumns.length() > 0 ? ", " + orderColumns.toString() : ""; + String queryText = + "select " + PARTITIONS + ".\"PART_NAME\"" + columns + " from " + PARTITIONS + "" + + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" " + + " and " + TBLS + ".\"TBL_NAME\" = ? " + + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + + " and " + DBS + ".\"NAME\" = ? " + + join(joins, ' ') + + " where " + DBS + ".\"CTLG_NAME\" = ? " + + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderCls; + + Object[] params = new Object[paramsForFilter.size() + paramsForOrder.size() + 3]; + for (i = 0; i < paramsForOrder.size(); i++) { + params[i] = paramsForOrder.get(i); + } + params[i] = tblName; + params[i+1] = dbName; + params[i+2] = catName; + for (int j = 0; j < paramsForFilter.size(); j++) { + params[i + j + 3] = paramsForFilter.get(j); + } + + Query query = pm.newQuery("javax.jdo.query.SQL", queryText); + List partNames = new LinkedList(); + max = (max == null ? -1 : max); + try { + List sqlResult = executeWithArray(query, params, queryText, max); + for (Object result : sqlResult) { + String partName = !columns.isEmpty() ? + String.valueOf(((Object[]) result)[0]) : String.valueOf(result); + partNames.add(partName); + } + } finally { + query.closeAll(); + } + return partNames; + } + /** * Gets partitions by using direct SQL queries. * @param catName Metastore catalog name. @@ -648,6 +783,15 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ private final List joins = new ArrayList<>(); private String filter; private Table table; + // Should remove null values in joins + private boolean compactJoins; + SqlFilterForPushdown() { + this.compactJoins = true; + } + public SqlFilterForPushdown(Table table, boolean compactJoins) { + this.table = table; + this.compactJoins = compactJoins; + } } public boolean generateSqlFilterForPushdown( @@ -662,7 +806,7 @@ public boolean generateSqlFilterForPushdown(Table table, ExpressionTree tree, St result.table = table; result.filter = PartitionFilterGenerator.generateSqlFilter(table, tree, result.params, result.joins, dbHasJoinCastBug, ((defaultPartitionName == null) ? defaultPartName : defaultPartitionName), - dbType, schema); + dbType, schema, result.compactJoins); return result.filter != null; } @@ -1031,7 +1175,7 @@ private PartitionFilterGenerator(Table table, List params, List */ private static String generateSqlFilter(Table table, ExpressionTree tree, List params, List joins, boolean dbHasJoinCastBug, String defaultPartName, - DatabaseProduct dbType, String schema) throws MetaException { + DatabaseProduct dbType, String schema, boolean compactJoins) throws MetaException { assert table != null; if (tree == null) { // consistent with other APIs like makeExpressionTree, null is returned to indicate that @@ -1050,9 +1194,11 @@ private static String generateSqlFilter(Table table, ExpressionTree tree, List listPartitionNames(final String catName, final String db_name, final String tbl_name, + final String defaultPartName, final byte[] exprBytes, + final String order, final short max_parts) throws MetaException, NoSuchObjectException { + + return new GetListHelper(catName, db_name, tbl_name, true, false) { + private List getPartitionNamesByExprNoTxn(Table table) throws MetaException { + SqlFilterForPushdown filter = new SqlFilterForPushdown(table, false); + List result = directSql.getPartitionNamesViaSql(filter, defaultPartName, order, -1); + expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), exprBytes, + getDefaultPartitionName(defaultPartName), result); + if (max_parts >=0 && result.size() > max_parts) { + result = result.subList(0, max_parts); + } + return result; + } + @Override + protected List getSqlResult(GetHelper> ctx) throws MetaException { + SqlFilterForPushdown filter = new SqlFilterForPushdown(ctx.getTable(),false); + if (exprBytes.length == 1 && exprBytes[0] == -1) { + return directSql.getPartitionNamesViaSql(filter, defaultPartName, order, (int)max_parts); + } + + List partNames = null; + final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, exprBytes, + getDefaultPartitionName(defaultPartName)); + // If we have some sort of expression tree, try SQL filter pushdown. + if (exprTree != null) { + if (directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, defaultPartName, filter)) { + partNames = directSql.getPartitionNamesViaSql(filter, defaultPartName, order, (int)max_parts); + } + } + if (partNames == null) { + partNames = getPartitionNamesByExprNoTxn(ctx.getTable()); + } + return partNames; + } + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException, NoSuchObjectException { + return Collections.emptyList(); + } + }.run(true); + } + private String extractPartitionKey(FieldSchema key, List pkeys) { StringBuilder buffer = new StringBuilder(256); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index c334421adf..f54e5719ce 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -482,6 +482,19 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) throws MetaException; + /** + * Get a partial or complete list of names for partitions of a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param max_parts maximum number of partitions to retrieve, -1 for all. + * @return list of partition names. + * @throws MetaException there was an error accessing the RDBMS + */ + List listPartitionNames(String catName, String db_name, String tbl_name, + String defaultPartName, byte[] exprBytes, String order, + short max_parts) throws MetaException, NoSuchObjectException; + /** * Get a list of partition values as one big struct. * @param catName catalog name. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index f31cc5d7a1..985c238269 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -1518,6 +1518,13 @@ private void validateTableType(Table tbl) { return partitionNames; } + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, String defaultPartName, + byte[] exprBytes, String order, short max_parts) throws MetaException, NoSuchObjectException { + + throw new UnsupportedOperationException(); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String dbName, String tblName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f7032b93d1..2565a24301 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -362,6 +362,13 @@ public void updateCreationMetadata(String catName, String dbname, String tablena return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, String defaultPartName, + byte[] exprBytes, String order, short max_parts) throws MetaException, NoSuchObjectException { + return objectStore.listPartitionNames(catName, db_name, tbl_name, + defaultPartName, exprBytes, order, max_parts); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index bea7e9572b..d05037a8ec 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -362,6 +362,13 @@ public void updateCreationMetadata(String catName, String dbname, String tablena return Collections.emptyList(); } + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, String defaultPartName, + byte[] exprBytes, String order, short max_parts) throws MetaException, NoSuchObjectException { + + return Collections.emptyList(); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index fc071f9a20..7ea6283ad0 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -1416,6 +1416,14 @@ public Partition getPartition(String db_name, String tbl_name, return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + String defaultPartName, byte[] exprBytes, String order, + short max_parts) throws MetaException, TException, NoSuchObjectException { + + throw new UnsupportedOperationException(); + } + @Override public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) throws MetaException, TException, NoSuchObjectException {