diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 4984138ef8..7e0ce0734b 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -394,6 +394,15 @@ public Table alterTable(String catName, String dbName, String name, Table newTab return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, + short maxParts) throws MetaException, NoSuchObjectException { + + return objectStore.listPartitionNames(catName, dbName, tblName, + defaultPartName, exprBytes, order, maxParts); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, diff --git parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 2b865f3cf7..47acd009b1 100644 --- parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -1674,7 +1674,7 @@ showStatement | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tableName ((KW_FROM|KW_IN) db_name=identifier)? (KW_LIKE showStmtIdentifier|showStmtIdentifier)? -> ^(TOK_SHOWCOLUMNS tableName (TOK_FROM $db_name)? showStmtIdentifier?) | KW_SHOW KW_FUNCTIONS (KW_LIKE showFunctionIdentifier)? -> ^(TOK_SHOWFUNCTIONS KW_LIKE? showFunctionIdentifier?) - | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec?) + | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? whereClause? orderByClause? limitClause? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec? whereClause? orderByClause? limitClause?) | KW_SHOW KW_CREATE ( (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) db_name=identifier -> ^(TOK_SHOW_CREATEDATABASE $db_name) | diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java index 2f659e6382..ff95872321 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java @@ -21,9 +21,11 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -32,7 +34,15 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveTableName; +import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.type.ExprNodeTypeCheck; +import org.apache.hadoop.hive.ql.parse.type.TypeCheckCtx; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; /** * Analyzer for show partition commands. @@ -57,10 +67,75 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { inputs.add(new ReadEntity(table)); ShowPartitionsDesc desc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); + analyzeShowPartitionsConstraints(ast, table, desc); Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); rootTasks.add(task); task.setFetchSource(true); setFetchTask(createFetchTask(ShowPartitionsDesc.SCHEMA)); } + + private void analyzeShowPartitionsConstraints(ASTNode ast, Table tab, + ShowPartitionsDesc showPartsDesc) throws SemanticException { + for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) { + ASTNode astChild = (ASTNode)ast.getChild(childIndex); + if (astChild.getType() == HiveParser.TOK_LIMIT) { + short limit = Short.valueOf(((ASTNode)astChild.getChild(0)).getText()); + showPartsDesc.setLimit(limit); + } else { + RowResolver rwsch = new RowResolver(); + for (FieldSchema partCol : tab.getPartCols()) { + rwsch.put(tab.getTableName(), partCol.getName(), new ColumnInfo(partCol.getName(), + TypeInfoFactory.getPrimitiveTypeInfo(partCol.getType()), null, true)); + } + TypeCheckCtx tcCtx = new TypeCheckCtx(rwsch); + if (astChild.getType() == HiveParser.TOK_WHERE) { + ASTNode conds = (ASTNode) astChild.getChild(0); + // @TODO default partition + Map nodeOutputs = ExprNodeTypeCheck.genExprNode(conds, tcCtx); + ExprNodeDesc desc = nodeOutputs.get(conds); + if (!(desc instanceof ExprNodeGenericFuncDesc) + || !((ExprNodeGenericFuncDesc) desc).getTypeInfo() + .equals(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.BOOLEAN_TYPE_NAME))) { + throw new SemanticException("Not a filter expr: " + (desc == null ? "null" : desc.getExprString())); + } + showPartsDesc.setCond(desc); + } + + if (astChild.getType() == HiveParser.TOK_ORDERBY) { + StringBuilder colIndices = new StringBuilder(); + StringBuilder order = new StringBuilder(); + int ccount = astChild.getChildCount(); + for (int i = 0; i < ccount; ++i) { + ASTNode cl = (ASTNode) astChild.getChild(i); + if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { + order.append("+"); + cl = (ASTNode) cl.getChild(0).getChild(0); + } else if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEDESC) { + order.append("-"); + cl = (ASTNode) cl.getChild(0).getChild(0); + } else { + order.append("+"); + } + Map nodeOutputs = ExprNodeTypeCheck.genExprNode(cl, tcCtx); + ExprNodeDesc desc = nodeOutputs.get(cl); + if (!(desc instanceof ExprNodeColumnDesc)) { + throw new SemanticException("Only partition keys are allowed for " + + "ordering partition names, input: " + cl.toStringTree()); + } + String col = ((ExprNodeColumnDesc) desc).getColumn(); + for (int k = 0; k < tab.getPartCols().size(); k++) { + String column = tab.getPartCols().get(k).getName(); + if (col.equalsIgnoreCase(column)) { + colIndices.append(k).append(","); + break; + } + } + } + colIndices.setLength(colIndices.length() - 1); + showPartsDesc.setOrder(colIndices.append(":").append(order).toString()); + } + } + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java index eeef253af8..23a4bb1bf1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** * DDL task description for SHOW PARTITIONS commands. @@ -38,6 +39,9 @@ private final String tabName; private final String resFile; private final Map partSpec; + private short limit = -1; + private String order; + private ExprNodeDesc cond; public ShowPartitionsDesc(String tabName, Path resFile, Map partSpec) { this.tabName = tabName; @@ -59,4 +63,39 @@ public String getTabName() { public String getResFile() { return resFile; } + + public void setLimit(short limit) { + this.limit = limit; + } + + public void setOrder(String order) { + this.order = order; + } + + public void setCond(ExprNodeDesc cond) { + this.cond = cond; + } + + public short getLimit() { + return limit; + } + + @Explain(displayName = "limit", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public int getLimitExplain() { + return limit; + } + + @Explain(displayName = "order", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getOrder() { + return order; + } + + public ExprNodeDesc getCond() { + return cond; + } + + @Explain(displayName = "condition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getFilterStr() { + return cond.getExprString(); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java index 2b1a002748..3255417aa5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java @@ -19,15 +19,31 @@ package org.apache.hadoop.hive.ql.ddl.table.partition.show; import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; /** * Operation process of showing the partitions of a table. @@ -44,11 +60,49 @@ public int execute() throws HiveException { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, desc.getTabName()); } - List parts = null; - if (desc.getPartSpec() != null) { - parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), desc.getPartSpec(), (short) -1); + List parts; + if (desc.getCond() != null || desc.getOrder() != null) { + ExprNodeDesc pred = desc.getCond(); + if (desc.getPartSpec() != null) { + List fieldSchemas = tbl.getPartitionKeys(); + Map infoMap = new HashMap(); + for (FieldSchema schema : fieldSchemas) { + ColumnInfo info = new ColumnInfo(schema.getName(), + TypeInfoFactory.getPrimitiveTypeInfo(schema.getType()), null, true); + infoMap.put(schema.getName().toLowerCase(), info); + } + for (Map.Entry entry : desc.getPartSpec().entrySet()) { + ColumnInfo partCol = infoMap.get(entry.getKey().toLowerCase()); + TypeInfo stringTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + Object partVal = entry.getValue(); + if (!partCol.getType().equals(stringTypeInfo)) { + partVal = ObjectInspectorConverters.getConverter( + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(stringTypeInfo), + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(partCol.getType())) + .convert(partVal); + if (partVal == null) { + throw new HiveException("Cannot convert to " + partCol.getType() + " from: " + + stringTypeInfo + ", value: " + entry.getValue()); + } + } + List children = new ArrayList(); + children.add(new ExprNodeColumnDesc(partCol)); + children.add(new ExprNodeConstantDesc(partCol.getType(), partVal)); + ExprNodeDesc exprNodeDesc = ExprNodeGenericFuncDesc.newInstance( + FunctionRegistry.getFunctionInfo("=").getGenericUDF(), + children); + pred = (pred == null) ? exprNodeDesc : + ExprNodeDescUtils.mergePredicates(exprNodeDesc, pred); + } + } + + parts = context.getDb().getPartitionNames(tbl, (ExprNodeGenericFuncDesc) pred, + desc.getOrder(), desc.getLimit()); + } else if (desc.getPartSpec() != null) { + parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), + desc.getPartSpec(), desc.getLimit()); } else { - parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short) -1); + parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), desc.getLimit()); } // write the results in the file diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 1f9fb3b897..bbc16a50f3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -3600,6 +3600,26 @@ public boolean dropPartition(String dbName, String tableName, List parti return names; } + public List getPartitionNames(Table tbl, ExprNodeGenericFuncDesc expr, String order, + short maxParts) throws HiveException { + List names = null; + byte[] exprBytes = {(byte)-1}; + if (expr != null) { + exprBytes = SerializationUtilities.serializeExpressionToKryo(expr); + } + String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + try { + names = getMSC().listPartitionNames(tbl.getCatalogName(), tbl.getDbName(), + tbl.getTableName(), defaultPartitionName, exprBytes, order, maxParts); + } catch (NoSuchObjectException nsoe) { + return Lists.newArrayList(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + return names; + } + /** * get all the partitions that the table has * diff --git ql/src/test/queries/clientpositive/show_partitions2.q ql/src/test/queries/clientpositive/show_partitions2.q new file mode 100644 index 0000000000..e484f4ba46 --- /dev/null +++ ql/src/test/queries/clientpositive/show_partitions2.q @@ -0,0 +1,53 @@ +CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE); +ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-09'); +ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-10'); +ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-11'); +ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-19'); + +SHOW PARTITIONS mpart1 WHERE ds > '1980-11-10' AND ds < '1980-11-20'; +SHOW PARTITIONS mpart1 WHERE ds > '1980-11-09' ORDER BY ds DESC LIMIT 2; +SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 2; +SHOW PARTITIONS mpart1 LIMIT 2; +SHOW PARTITIONS mpart1 ORDER BY ds DESC; + +CREATE TABLE mpart2 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hs INT, rs STRING); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=17, rs='EU'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=19, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=22, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=10, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=20, rs='AF'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=16, rs='AS'); +ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=22, rs='AS'); + +CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hs INT, rs STRING); +INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA'); + +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; +INSERT INTO TABLE mpart2 PARTITION(ds, hs, rs) SELECT * FROM srcpart1; + +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10'; +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20; +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC; +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC LIMIT 4; +SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4; +SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20; +SHOW PARTITIONS mpart2 WHERE hs > 9 AND hs < 19 ORDER BY hs DESC, ds; + +SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC; +SHOW PARTITIONS mpart2 ORDER BY hs ASC, ds DESC; +SHOW PARTITIONS mpart2 PARTITION (rs='AS') ORDER BY ds DESC; + +SHOW PARTITIONS mpart2 LIMIT 3; +SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3; + +EXPLAIN SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20; +EXPLAIN SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4; +EXPLAIN SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20; +EXPLAIN SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC; +EXPLAIN SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3; \ No newline at end of file diff --git ql/src/test/results/clientpositive/drop_partitions_filter.q.out ql/src/test/results/clientpositive/drop_partitions_filter.q.out index edfbbf7127..5e7da876e1 100644 --- ql/src/test/results/clientpositive/drop_partitions_filter.q.out +++ ql/src/test/results/clientpositive/drop_partitions_filter.q.out @@ -155,6 +155,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 table: ptestfilter_n1 Stage: Stage-1 diff --git ql/src/test/results/clientpositive/show_partitions.q.out ql/src/test/results/clientpositive/show_partitions.q.out index 0a73374987..feabfcf366 100644 --- ql/src/test/results/clientpositive/show_partitions.q.out +++ ql/src/test/results/clientpositive/show_partitions.q.out @@ -128,6 +128,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 partSpec: hr 11 table: default.srcpart @@ -159,6 +160,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 partSpec: ds 2008-04-08 hr 12 diff --git ql/src/test/results/clientpositive/show_partitions2.q.out ql/src/test/results/clientpositive/show_partitions2.q.out new file mode 100644 index 0000000000..bf8da40f8a --- /dev/null +++ ql/src/test/results/clientpositive/show_partitions2.q.out @@ -0,0 +1,480 @@ +PREHOOK: query: CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mpart1 +POSTHOOK: query: CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mpart1 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-09') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-09') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-10') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-10') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-11') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-11') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-11 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-19') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds = '1980-11-19') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-19 +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds > '1980-11-10' AND ds < '1980-11-20' +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds > '1980-11-10' AND ds < '1980-11-20' +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-11 +ds=1980-11-19 +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds > '1980-11-09' ORDER BY ds DESC LIMIT 2 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds > '1980-11-09' ORDER BY ds DESC LIMIT 2 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-19 +ds=1980-11-11 +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 2 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 2 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10 +PREHOOK: query: SHOW PARTITIONS mpart1 LIMIT 2 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 LIMIT 2 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-09 +ds=1980-11-10 +PREHOOK: query: SHOW PARTITIONS mpart1 ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-19 +ds=1980-11-11 +ds=1980-11-10 +ds=1980-11-09 +PREHOOK: query: CREATE TABLE mpart2 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hs INT, rs STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mpart2 +POSTHOOK: query: CREATE TABLE mpart2 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hs INT, rs STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mpart2 +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=17, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=17, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-09/hs=17/rs=EU +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=19, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=19, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-09/hs=19/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=22, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-09', hs=22, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-09/hs=22/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=12/rs=EU +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=10, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=10, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=10/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=15/rs=EU +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=16/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=20, rs='AF') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=20, rs='AF') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=20/rs=AF +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=21/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=16, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=16, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-11/hs=16/rs=AS +PREHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=22, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart2 +POSTHOOK: query: ALTER TABLE mpart2 ADD PARTITION (ds='1980-11-11', hs=22, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart2 +POSTHOOK: Output: default@mpart2@ds=1980-11-11/hs=22/rs=AS +PREHOOK: query: CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hs INT, rs STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart1 +POSTHOOK: query: CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hs INT, rs STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart1 +PREHOOK: query: INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@srcpart1 +POSTHOOK: query: INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@srcpart1 +POSTHOOK: Lineage: srcpart1.ds SCRIPT [] +POSTHOOK: Lineage: srcpart1.hs SCRIPT [] +POSTHOOK: Lineage: srcpart1.key1 SCRIPT [] +POSTHOOK: Lineage: srcpart1.rs SCRIPT [] +POSTHOOK: Lineage: srcpart1.value1 SCRIPT [] +PREHOOK: query: INSERT INTO TABLE mpart2 PARTITION(ds, hs, rs) SELECT * FROM srcpart1 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart1 +PREHOOK: Output: default@mpart2 +POSTHOOK: query: INSERT INTO TABLE mpart2 PARTITION(ds, hs, rs) SELECT * FROM srcpart1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart1 +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=21/rs=NA +POSTHOOK: Output: default@mpart2@ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +POSTHOOK: Output: default@mpart2@ds=1980-11-11/hs=12/rs=AS +POSTHOOK: Output: default@mpart2@ds=1980-11-11/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +POSTHOOK: Output: default@mpart2@ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-10,hs=21,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-10,hs=21,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-10,hs=__HIVE_DEFAULT_PARTITION__,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-10,hs=__HIVE_DEFAULT_PARTITION__,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-11,hs=12,rs=AS).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-11,hs=12,rs=AS).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-11,hs=__HIVE_DEFAULT_PARTITION__,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=1980-11-11,hs=__HIVE_DEFAULT_PARTITION__,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=__HIVE_DEFAULT_PARTITION__,hs=__HIVE_DEFAULT_PARTITION__,rs=AS).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart2 PARTITION(ds=__HIVE_DEFAULT_PARTITION__,hs=__HIVE_DEFAULT_PARTITION__,rs=AS).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=10/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=21/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=10/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC LIMIT 4 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY hs DESC LIMIT 4 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AF +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=15/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=21/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 WHERE hs > 9 AND hs < 19 ORDER BY hs DESC, ds +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 WHERE hs > 9 AND hs < 19 ORDER BY hs DESC, ds +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-09/hs=17/rs=EU +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-11/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-11/hs=12/rs=AS +ds=1980-11-10/hs=10/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-11/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-11/hs=22/rs=AS +ds=1980-11-09/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-09/hs=19/rs=AS +ds=1980-11-09/hs=17/rs=EU +ds=1980-11-11/hs=16/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-11/hs=12/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=10/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs ASC, ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 ORDER BY hs ASC, ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=10/rs=AS +ds=1980-11-11/hs=12/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-11/hs=16/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-09/hs=17/rs=EU +ds=1980-11-09/hs=19/rs=AS +ds=1980-11-10/hs=20/rs=AF +ds=1980-11-10/hs=21/rs=NA +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-11/hs=22/rs=AS +ds=1980-11-09/hs=22/rs=AS +ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-11/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hs=__HIVE_DEFAULT_PARTITION__/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart2 PARTITION (rs='AS') ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 PARTITION (rs='AS') ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=__HIVE_DEFAULT_PARTITION__/hs=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-11/hs=12/rs=AS +ds=1980-11-11/hs=22/rs=AS +ds=1980-11-11/hs=16/rs=AS +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=10/rs=AS +ds=1980-11-09/hs=22/rs=AS +ds=1980-11-09/hs=19/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-09/hs=17/rs=EU +ds=1980-11-09/hs=19/rs=AS +ds=1980-11-09/hs=22/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=10/rs=AS +ds=1980-11-10/hs=15/rs=EU +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' AND hs >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + condition: ((ds = '1980-11-10') and (hs >= 20)) + limit: -1 + table: mpart2 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 WHERE ds = '1980-11-10' ORDER BY rs DESC, hs LIMIT 4 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + condition: (ds = '1980-11-10') + limit: 4 + order: 2,1:-+ + table: mpart2 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hs >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + condition: ((ds = '1980-11-10') and (hs >= 20)) + limit: -1 + partSpec: + rs AS + table: mpart2 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 ORDER BY hs DESC, ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + limit: -1 + order: 1,0:-- + table: mpart2 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart2 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart2 PARTITION(ds = '1980-11-10') LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart2 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + limit: 3 + partSpec: + ds 1980-11-10 + table: mpart2 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git ql/src/test/results/clientpositive/showparts.q.out ql/src/test/results/clientpositive/showparts.q.out index 662b7a2fab..febd590e0c 100644 --- ql/src/test/results/clientpositive/showparts.q.out +++ ql/src/test/results/clientpositive/showparts.q.out @@ -13,6 +13,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 table: srcpart Stage: Stage-1 diff --git ql/src/test/results/clientpositive/temp_table_drop_partitions_filter.q.out ql/src/test/results/clientpositive/temp_table_drop_partitions_filter.q.out index ec4d537ced..c6bcf5f1d5 100644 --- ql/src/test/results/clientpositive/temp_table_drop_partitions_filter.q.out +++ ql/src/test/results/clientpositive/temp_table_drop_partitions_filter.q.out @@ -155,6 +155,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 table: ptestfilter_n1_temp Stage: Stage-1 diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java index 0e72625e01..c55da7377b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField DEFAULT_PARTITION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultPartitionName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I16, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("order", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private String defaultPartitionName; // optional private short maxParts; // optional private String catName; // optional + private String order; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ EXPR((short)3, "expr"), DEFAULT_PARTITION_NAME((short)4, "defaultPartitionName"), MAX_PARTS((short)5, "maxParts"), - CAT_NAME((short)6, "catName"); + CAT_NAME((short)6, "catName"), + ORDER((short)7, "order"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return MAX_PARTS; case 6: // CAT_NAME return CAT_NAME; + case 7: // ORDER + return ORDER; default: return null; } @@ -134,7 +139,7 @@ public String getFieldName() { // isset id assignments private static final int __MAXPARTS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME,_Fields.ORDER}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -150,6 +155,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ORDER, new org.apache.thrift.meta_data.FieldMetaData("order", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsByExprRequest.class, metaDataMap); } @@ -191,6 +198,9 @@ public PartitionsByExprRequest(PartitionsByExprRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + if (other.isSetOrder()) { + this.order = other.order; + } } public PartitionsByExprRequest deepCopy() { @@ -206,6 +216,7 @@ public void clear() { this.maxParts = (short)-1; this.catName = null; + this.order = null; } public String getDbName() { @@ -354,6 +365,29 @@ public void setCatNameIsSet(boolean value) { } } + public String getOrder() { + return this.order; + } + + public void setOrder(String order) { + this.order = order; + } + + public void unsetOrder() { + this.order = null; + } + + /** Returns true if field order is set (has been assigned a value) and false otherwise */ + public boolean isSetOrder() { + return this.order != null; + } + + public void setOrderIsSet(boolean value) { + if (!value) { + this.order = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -404,6 +438,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ORDER: + if (value == null) { + unsetOrder(); + } else { + setOrder((String)value); + } + break; + } } @@ -427,6 +469,9 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case ORDER: + return getOrder(); + } throw new IllegalStateException(); } @@ -450,6 +495,8 @@ public boolean isSet(_Fields field) { return isSetMaxParts(); case CAT_NAME: return isSetCatName(); + case ORDER: + return isSetOrder(); } throw new IllegalStateException(); } @@ -521,6 +568,15 @@ public boolean equals(PartitionsByExprRequest that) { return false; } + boolean this_present_order = true && this.isSetOrder(); + boolean that_present_order = true && that.isSetOrder(); + if (this_present_order || that_present_order) { + if (!(this_present_order && that_present_order)) + return false; + if (!this.order.equals(that.order)) + return false; + } + return true; } @@ -558,6 +614,11 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_order = true && (isSetOrder()); + list.add(present_order); + if (present_order) + list.add(order); + return list.hashCode(); } @@ -629,6 +690,16 @@ public int compareTo(PartitionsByExprRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetOrder()).compareTo(other.isSetOrder()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOrder()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.order, other.order); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -698,6 +769,16 @@ public String toString() { } first = false; } + if (isSetOrder()) { + if (!first) sb.append(", "); + sb.append("order:"); + if (this.order == null) { + sb.append("null"); + } else { + sb.append(this.order); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -803,6 +884,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprReq org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // ORDER + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.order = iprot.readString(); + struct.setOrderIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -850,6 +939,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldEnd(); } } + if (struct.order != null) { + if (struct.isSetOrder()) { + oprot.writeFieldBegin(ORDER_FIELD_DESC); + oprot.writeString(struct.order); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -880,7 +976,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetCatName()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetOrder()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDefaultPartitionName()) { oprot.writeString(struct.defaultPartitionName); } @@ -890,6 +989,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetOrder()) { + oprot.writeString(struct.order); + } } @Override @@ -901,7 +1003,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.setTblNameIsSet(true); struct.expr = iprot.readBinary(); struct.setExprIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.defaultPartitionName = iprot.readString(); struct.setDefaultPartitionNameIsSet(true); @@ -914,6 +1016,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(3)) { + struct.order = iprot.readString(); + struct.setOrderIsSet(true); + } } } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 26cc9dd137..830dd78c3c 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -206,6 +206,8 @@ public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partition_names_req(PartitionsByExprRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -672,6 +674,8 @@ public void get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_names_req(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -3453,6 +3457,35 @@ public void send_get_partition_names_ps(String db_name, String tbl_name, List get_partition_names_req(PartitionsByExprRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + send_get_partition_names_req(req); + return recv_get_partition_names_req(); + } + + public void send_get_partition_names_req(PartitionsByExprRequest req) throws org.apache.thrift.TException + { + get_partition_names_req_args args = new get_partition_names_req_args(); + args.setReq(req); + sendBase("get_partition_names_req", args); + } + + public List recv_get_partition_names_req() throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + get_partition_names_req_result result = new get_partition_names_req_result(); + receiveBase(result, "get_partition_names_req"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partition_names_req failed: unknown result"); + } + public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); @@ -10536,6 +10569,38 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } + public void get_partition_names_req(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_partition_names_req_call method_call = new get_partition_names_req_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_req_call extends org.apache.thrift.async.TAsyncMethodCall { + private PartitionsByExprRequest req; + public get_partition_names_req_call(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_partition_names_req", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_partition_names_req_args args = new get_partition_names_req_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public List getResult() throws MetaException, NoSuchObjectException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_partition_names_req(); + } + } + public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); get_partitions_by_filter_call method_call = new get_partitions_by_filter_call(db_name, tbl_name, filter, max_parts, resultHandler, this, ___protocolFactory, ___transport); @@ -15588,6 +15653,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_partition_names_req() { + super("get_partition_names_req"); + } + + public get_partition_names_req_args getEmptyArgsInstance() { + return new get_partition_names_req_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_partition_names_req_result getResult(I iface, get_partition_names_req_args args) throws org.apache.thrift.TException { + get_partition_names_req_result result = new get_partition_names_req_result(); + try { + result.success = iface.get_partition_names_req(args.req); + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter extends org.apache.thrift.ProcessFunction { public get_partitions_by_filter() { super("get_partitions_by_filter"); @@ -21743,6 +21835,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_by_filter() { - super("get_partitions_by_filter"); - } - - public get_partitions_by_filter_args getEmptyArgsInstance() { - return new get_partitions_by_filter_args(); - } - - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partitions_by_filter_result result = new get_partitions_by_filter_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - get_partitions_by_filter_result result = new get_partitions_by_filter_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter extends org.apache.thrift.AsyncProcessFunction> { - public get_part_specs_by_filter() { - super("get_part_specs_by_filter"); - } - - public get_part_specs_by_filter_args getEmptyArgsInstance() { - return new get_part_specs_by_filter_args(); - } - - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr extends org.apache.thrift.AsyncProcessFunction { - public get_partitions_by_expr() { - super("get_partitions_by_expr"); - } - - public get_partitions_by_expr_args getEmptyArgsInstance() { - return new get_partitions_by_expr_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(PartitionsByExprResult o) { - get_partitions_by_expr_result result = new get_partitions_by_expr_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - get_partitions_by_expr_result result = new get_partitions_by_expr_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partitions_by_expr(args.req,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_num_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction { - public get_num_partitions_by_filter() { - super("get_num_partitions_by_filter"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_req extends org.apache.thrift.AsyncProcessFunction> { + public get_partition_names_req() { + super("get_partition_names_req"); } - public get_num_partitions_by_filter_args getEmptyArgsInstance() { - return new get_num_partitions_by_filter_args(); + public get_partition_names_req_args getEmptyArgsInstance() { + return new get_partition_names_req_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Integer o) { - get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partition_names_req_result result = new get_partition_names_req_result(); result.success = o; - result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -27264,7 +27170,7 @@ public void onComplete(Integer o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + get_partition_names_req_result result = new get_partition_names_req_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -27295,25 +27201,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_num_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter,resultHandler); + public void start(I iface, get_partition_names_req_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partition_names_req(args.req,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_by_names() { - super("get_partitions_by_names"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_by_filter() { + super("get_partitions_by_filter"); } - public get_partitions_by_names_args getEmptyArgsInstance() { - return new get_partitions_by_names_args(); + public get_partitions_by_filter_args getEmptyArgsInstance() { + return new get_partitions_by_filter_args(); } public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback>() { public void onComplete(List o) { - get_partitions_by_names_result result = new get_partitions_by_names_result(); + get_partitions_by_filter_result result = new get_partitions_by_filter_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -27326,7 +27232,256 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_by_names_result result = new get_partitions_by_names_result(); + get_partitions_by_filter_result result = new get_partitions_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter extends org.apache.thrift.AsyncProcessFunction> { + public get_part_specs_by_filter() { + super("get_part_specs_by_filter"); + } + + public get_part_specs_by_filter_args getEmptyArgsInstance() { + return new get_part_specs_by_filter_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr extends org.apache.thrift.AsyncProcessFunction { + public get_partitions_by_expr() { + super("get_partitions_by_expr"); + } + + public get_partitions_by_expr_args getEmptyArgsInstance() { + return new get_partitions_by_expr_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(PartitionsByExprResult o) { + get_partitions_by_expr_result result = new get_partitions_by_expr_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_partitions_by_expr_result result = new get_partitions_by_expr_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_partitions_by_expr(args.req,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_num_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction { + public get_num_partitions_by_filter() { + super("get_num_partitions_by_filter"); + } + + public get_num_partitions_by_filter_args getEmptyArgsInstance() { + return new get_num_partitions_by_filter_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Integer o) { + get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + result.success = o; + result.setSuccessIsSet(true); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_num_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_by_names() { + super("get_partitions_by_names"); + } + + public get_partitions_by_names_args getEmptyArgsInstance() { + return new get_partitions_by_names_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partitions_by_names_result result = new get_partitions_by_names_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_partitions_by_names_result result = new get_partitions_by_names_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -125046,31 +125201,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_filter_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partition_names_req_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_by_filter_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_by_filter_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partition_names_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partition_names_req_argsTupleSchemeFactory()); } - private String db_name; // required - private String tbl_name; // required - private String filter; // required - private short max_parts; // required + private PartitionsByExprRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"), - FILTER((short)3, "filter"), - MAX_PARTS((short)4, "max_parts"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -125085,14 +125231,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME - return DB_NAME; - case 2: // TBL_NAME - return TBL_NAME; - case 3: // FILTER - return FILTER; - case 4: // MAX_PARTS - return MAX_PARTS; + case 1: // REQ + return REQ; default: return null; } @@ -125133,194 +125273,73 @@ public String getFieldName() { } // isset id assignments - private static final int __MAX_PARTS_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsByExprRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_names_req_args.class, metaDataMap); } - public get_partitions_by_filter_args() { - this.max_parts = (short)-1; - + public get_partition_names_req_args() { } - public get_partitions_by_filter_args( - String db_name, - String tbl_name, - String filter, - short max_parts) + public get_partition_names_req_args( + PartitionsByExprRequest req) { this(); - this.db_name = db_name; - this.tbl_name = tbl_name; - this.filter = filter; - this.max_parts = max_parts; - setMax_partsIsSet(true); + this.req = req; } /** * Performs a deep copy on other. */ - public get_partitions_by_filter_args(get_partitions_by_filter_args other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetDb_name()) { - this.db_name = other.db_name; - } - if (other.isSetTbl_name()) { - this.tbl_name = other.tbl_name; - } - if (other.isSetFilter()) { - this.filter = other.filter; + public get_partition_names_req_args(get_partition_names_req_args other) { + if (other.isSetReq()) { + this.req = new PartitionsByExprRequest(other.req); } - this.max_parts = other.max_parts; } - public get_partitions_by_filter_args deepCopy() { - return new get_partitions_by_filter_args(this); + public get_partition_names_req_args deepCopy() { + return new get_partition_names_req_args(this); } @Override public void clear() { - this.db_name = null; - this.tbl_name = null; - this.filter = null; - this.max_parts = (short)-1; - - } - - public String getDb_name() { - return this.db_name; - } - - public void setDb_name(String db_name) { - this.db_name = db_name; - } - - public void unsetDb_name() { - this.db_name = null; - } - - /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_name() { - return this.db_name != null; - } - - public void setDb_nameIsSet(boolean value) { - if (!value) { - this.db_name = null; - } - } - - public String getTbl_name() { - return this.tbl_name; - } - - public void setTbl_name(String tbl_name) { - this.tbl_name = tbl_name; - } - - public void unsetTbl_name() { - this.tbl_name = null; - } - - /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_name() { - return this.tbl_name != null; - } - - public void setTbl_nameIsSet(boolean value) { - if (!value) { - this.tbl_name = null; - } + this.req = null; } - public String getFilter() { - return this.filter; + public PartitionsByExprRequest getReq() { + return this.req; } - public void setFilter(String filter) { - this.filter = filter; + public void setReq(PartitionsByExprRequest req) { + this.req = req; } - public void unsetFilter() { - this.filter = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field filter is set (has been assigned a value) and false otherwise */ - public boolean isSetFilter() { - return this.filter != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setFilterIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.filter = null; + this.req = null; } } - public short getMax_parts() { - return this.max_parts; - } - - public void setMax_parts(short max_parts) { - this.max_parts = max_parts; - setMax_partsIsSet(true); - } - - public void unsetMax_parts() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_PARTS_ISSET_ID); - } - - /** Returns true if field max_parts is set (has been assigned a value) and false otherwise */ - public boolean isSetMax_parts() { - return EncodingUtils.testBit(__isset_bitfield, __MAX_PARTS_ISSET_ID); - } - - public void setMax_partsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_NAME: - if (value == null) { - unsetDb_name(); - } else { - setDb_name((String)value); - } - break; - - case TBL_NAME: - if (value == null) { - unsetTbl_name(); - } else { - setTbl_name((String)value); - } - break; - - case FILTER: - if (value == null) { - unsetFilter(); - } else { - setFilter((String)value); - } - break; - - case MAX_PARTS: + case REQ: if (value == null) { - unsetMax_parts(); + unsetReq(); } else { - setMax_parts((Short)value); + setReq((PartitionsByExprRequest)value); } break; @@ -125329,17 +125348,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_NAME: - return getDb_name(); - - case TBL_NAME: - return getTbl_name(); - - case FILTER: - return getFilter(); - - case MAX_PARTS: - return getMax_parts(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -125352,14 +125362,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_NAME: - return isSetDb_name(); - case TBL_NAME: - return isSetTbl_name(); - case FILTER: - return isSetFilter(); - case MAX_PARTS: - return isSetMax_parts(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -125368,48 +125372,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partitions_by_filter_args) - return this.equals((get_partitions_by_filter_args)that); + if (that instanceof get_partition_names_req_args) + return this.equals((get_partition_names_req_args)that); return false; } - public boolean equals(get_partitions_by_filter_args that) { + public boolean equals(get_partition_names_req_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) - return false; - if (!this.db_name.equals(that.db_name)) - return false; - } - - boolean this_present_tbl_name = true && this.isSetTbl_name(); - boolean that_present_tbl_name = true && that.isSetTbl_name(); - if (this_present_tbl_name || that_present_tbl_name) { - if (!(this_present_tbl_name && that_present_tbl_name)) - return false; - if (!this.tbl_name.equals(that.tbl_name)) - return false; - } - - boolean this_present_filter = true && this.isSetFilter(); - boolean that_present_filter = true && that.isSetFilter(); - if (this_present_filter || that_present_filter) { - if (!(this_present_filter && that_present_filter)) - return false; - if (!this.filter.equals(that.filter)) - return false; - } - - boolean this_present_max_parts = true; - boolean that_present_max_parts = true; - if (this_present_max_parts || that_present_max_parts) { - if (!(this_present_max_parts && that_present_max_parts)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (this.max_parts != that.max_parts) + if (!this.req.equals(that.req)) return false; } @@ -125420,73 +125397,28 @@ public boolean equals(get_partitions_by_filter_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_name = true && (isSetDb_name()); - list.add(present_db_name); - if (present_db_name) - list.add(db_name); - - boolean present_tbl_name = true && (isSetTbl_name()); - list.add(present_tbl_name); - if (present_tbl_name) - list.add(tbl_name); - - boolean present_filter = true && (isSetFilter()); - list.add(present_filter); - if (present_filter) - list.add(filter); - - boolean present_max_parts = true; - list.add(present_max_parts); - if (present_max_parts) - list.add(max_parts); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(get_partitions_by_filter_args other) { + public int compareTo(get_partition_names_req_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDb_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTbl_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetFilter()).compareTo(other.isSetFilter()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFilter()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filter, other.filter); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetMax_parts()).compareTo(other.isSetMax_parts()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetMax_parts()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_parts, other.max_parts); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -125508,36 +125440,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partitions_by_filter_args("); + StringBuilder sb = new StringBuilder("get_partition_names_req_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { - sb.append("null"); - } else { - sb.append(this.db_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("tbl_name:"); - if (this.tbl_name == null) { - sb.append("null"); - } else { - sb.append(this.tbl_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("filter:"); - if (this.filter == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.filter); + sb.append(this.req); } first = false; - if (!first) sb.append(", "); - sb.append("max_parts:"); - sb.append(this.max_parts); - first = false; sb.append(")"); return sb.toString(); } @@ -125545,6 +125457,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (req != null) { + req.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -125557,23 +125472,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_partitions_by_filter_argsStandardSchemeFactory implements SchemeFactory { - public get_partitions_by_filter_argsStandardScheme getScheme() { - return new get_partitions_by_filter_argsStandardScheme(); + private static class get_partition_names_req_argsStandardSchemeFactory implements SchemeFactory { + public get_partition_names_req_argsStandardScheme getScheme() { + return new get_partition_names_req_argsStandardScheme(); } } - private static class get_partitions_by_filter_argsStandardScheme extends StandardScheme { + private static class get_partition_names_req_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names_req_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -125583,34 +125496,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f break; } switch (schemeField.id) { - case 1: // DB_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TBL_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // FILTER - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.filter = iprot.readString(); - struct.setFilterIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // MAX_PARTS - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.max_parts = iprot.readI16(); - struct.setMax_partsIsSet(true); + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new PartitionsByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -125624,100 +125514,58 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_names_req_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_name != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.db_name); - oprot.writeFieldEnd(); - } - if (struct.tbl_name != null) { - oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); - oprot.writeString(struct.tbl_name); - oprot.writeFieldEnd(); - } - if (struct.filter != null) { - oprot.writeFieldBegin(FILTER_FIELD_DESC); - oprot.writeString(struct.filter); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } - oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); - oprot.writeI16(struct.max_parts); - oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_partitions_by_filter_argsTupleSchemeFactory implements SchemeFactory { - public get_partitions_by_filter_argsTupleScheme getScheme() { - return new get_partitions_by_filter_argsTupleScheme(); + private static class get_partition_names_req_argsTupleSchemeFactory implements SchemeFactory { + public get_partition_names_req_argsTupleScheme getScheme() { + return new get_partition_names_req_argsTupleScheme(); } } - private static class get_partitions_by_filter_argsTupleScheme extends TupleScheme { + private static class get_partition_names_req_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names_req_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_name()) { + if (struct.isSetReq()) { optionals.set(0); } - if (struct.isSetTbl_name()) { - optionals.set(1); - } - if (struct.isSetFilter()) { - optionals.set(2); - } - if (struct.isSetMax_parts()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetDb_name()) { - oprot.writeString(struct.db_name); - } - if (struct.isSetTbl_name()) { - oprot.writeString(struct.tbl_name); - } - if (struct.isSetFilter()) { - oprot.writeString(struct.filter); - } - if (struct.isSetMax_parts()) { - oprot.writeI16(struct.max_parts); + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_req_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } - if (incoming.get(1)) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } - if (incoming.get(2)) { - struct.filter = iprot.readString(); - struct.setFilterIsSet(true); - } - if (incoming.get(3)) { - struct.max_parts = iprot.readI16(); - struct.setMax_partsIsSet(true); + struct.req = new PartitionsByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_filter_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partition_names_req_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -125725,11 +125573,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_by_filter_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_by_filter_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partition_names_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partition_names_req_resultTupleSchemeFactory()); } - private List success; // required + private List success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -125803,20 +125651,20 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_names_req_result.class, metaDataMap); } - public get_partitions_by_filter_result() { + public get_partition_names_req_result() { } - public get_partitions_by_filter_result( - List success, + public get_partition_names_req_result( + List success, MetaException o1, NoSuchObjectException o2) { @@ -125829,12 +125677,9 @@ public get_partitions_by_filter_result( /** * Performs a deep copy on other. */ - public get_partitions_by_filter_result(get_partitions_by_filter_result other) { + public get_partition_names_req_result(get_partition_names_req_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success.size()); - for (Partition other_element : other.success) { - __this__success.add(new Partition(other_element)); - } + List __this__success = new ArrayList(other.success); this.success = __this__success; } if (other.isSetO1()) { @@ -125845,8 +125690,8 @@ public get_partitions_by_filter_result(get_partitions_by_filter_result other) { } } - public get_partitions_by_filter_result deepCopy() { - return new get_partitions_by_filter_result(this); + public get_partition_names_req_result deepCopy() { + return new get_partition_names_req_result(this); } @Override @@ -125860,22 +125705,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(Partition elem) { + public void addToSuccess(String elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -125946,7 +125791,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((List)value); } break; @@ -126005,12 +125850,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partitions_by_filter_result) - return this.equals((get_partitions_by_filter_result)that); + if (that instanceof get_partition_names_req_result) + return this.equals((get_partition_names_req_result)that); return false; } - public boolean equals(get_partitions_by_filter_result that) { + public boolean equals(get_partition_names_req_result that) { if (that == null) return false; @@ -126067,7 +125912,7 @@ public int hashCode() { } @Override - public int compareTo(get_partitions_by_filter_result other) { + public int compareTo(get_partition_names_req_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -126121,7 +125966,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partitions_by_filter_result("); + StringBuilder sb = new StringBuilder("get_partition_names_req_result("); boolean first = true; sb.append("success:"); @@ -126172,15 +126017,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_partitions_by_filter_resultStandardSchemeFactory implements SchemeFactory { - public get_partitions_by_filter_resultStandardScheme getScheme() { - return new get_partitions_by_filter_resultStandardScheme(); + private static class get_partition_names_req_resultStandardSchemeFactory implements SchemeFactory { + public get_partition_names_req_resultStandardScheme getScheme() { + return new get_partition_names_req_resultStandardScheme(); } } - private static class get_partitions_by_filter_resultStandardScheme extends StandardScheme { + private static class get_partition_names_req_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names_req_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -126194,12 +126039,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); - struct.success = new ArrayList(_list1598.size); - Partition _elem1599; + struct.success = new ArrayList(_list1598.size); + String _elem1599; for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) { - _elem1599 = new Partition(); - _elem1599.read(iprot); + _elem1599 = iprot.readString(); struct.success.add(_elem1599); } iprot.readListEnd(); @@ -126236,17 +126080,17 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_names_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1601 : struct.success) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter1601 : struct.success) { - _iter1601.write(oprot); + oprot.writeString(_iter1601); } oprot.writeListEnd(); } @@ -126268,16 +126112,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ } - private static class get_partitions_by_filter_resultTupleSchemeFactory implements SchemeFactory { - public get_partitions_by_filter_resultTupleScheme getScheme() { - return new get_partitions_by_filter_resultTupleScheme(); + private static class get_partition_names_req_resultTupleSchemeFactory implements SchemeFactory { + public get_partition_names_req_resultTupleScheme getScheme() { + return new get_partition_names_req_resultTupleScheme(); } } - private static class get_partitions_by_filter_resultTupleScheme extends TupleScheme { + private static class get_partition_names_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -126293,9 +126137,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1602 : struct.success) + for (String _iter1602 : struct.success) { - _iter1602.write(oprot); + oprot.writeString(_iter1602); } } } @@ -126308,18 +126152,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1603.size); - Partition _elem1604; + org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1603.size); + String _elem1604; for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) { - _elem1604 = new Partition(); - _elem1604.read(iprot); + _elem1604 = iprot.readString(); struct.success.add(_elem1604); } } @@ -126340,24 +126183,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_part_specs_by_filter_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_filter_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I32, (short)4); + private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_part_specs_by_filter_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_part_specs_by_filter_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partitions_by_filter_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_by_filter_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required private String filter; // required - private int max_parts; // required + private short max_parts; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -126439,21 +126282,21 @@ public String getFieldName() { tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_args.class, metaDataMap); } - public get_part_specs_by_filter_args() { - this.max_parts = -1; + public get_partitions_by_filter_args() { + this.max_parts = (short)-1; } - public get_part_specs_by_filter_args( + public get_partitions_by_filter_args( String db_name, String tbl_name, String filter, - int max_parts) + short max_parts) { this(); this.db_name = db_name; @@ -126466,7 +126309,7 @@ public get_part_specs_by_filter_args( /** * Performs a deep copy on other. */ - public get_part_specs_by_filter_args(get_part_specs_by_filter_args other) { + public get_partitions_by_filter_args(get_partitions_by_filter_args other) { __isset_bitfield = other.__isset_bitfield; if (other.isSetDb_name()) { this.db_name = other.db_name; @@ -126480,8 +126323,8 @@ public get_part_specs_by_filter_args(get_part_specs_by_filter_args other) { this.max_parts = other.max_parts; } - public get_part_specs_by_filter_args deepCopy() { - return new get_part_specs_by_filter_args(this); + public get_partitions_by_filter_args deepCopy() { + return new get_partitions_by_filter_args(this); } @Override @@ -126489,7 +126332,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.filter = null; - this.max_parts = -1; + this.max_parts = (short)-1; } @@ -126562,11 +126405,11 @@ public void setFilterIsSet(boolean value) { } } - public int getMax_parts() { + public short getMax_parts() { return this.max_parts; } - public void setMax_parts(int max_parts) { + public void setMax_parts(short max_parts) { this.max_parts = max_parts; setMax_partsIsSet(true); } @@ -126614,7 +126457,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetMax_parts(); } else { - setMax_parts((Integer)value); + setMax_parts((Short)value); } break; @@ -126662,12 +126505,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_part_specs_by_filter_args) - return this.equals((get_part_specs_by_filter_args)that); + if (that instanceof get_partitions_by_filter_args) + return this.equals((get_partitions_by_filter_args)that); return false; } - public boolean equals(get_part_specs_by_filter_args that) { + public boolean equals(get_partitions_by_filter_args that) { if (that == null) return false; @@ -126738,7 +126581,7 @@ public int hashCode() { } @Override - public int compareTo(get_part_specs_by_filter_args other) { + public int compareTo(get_partitions_by_filter_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -126802,7 +126645,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_part_specs_by_filter_args("); + StringBuilder sb = new StringBuilder("get_partitions_by_filter_args("); boolean first = true; sb.append("db_name:"); @@ -126859,15 +126702,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_part_specs_by_filter_argsStandardSchemeFactory implements SchemeFactory { - public get_part_specs_by_filter_argsStandardScheme getScheme() { - return new get_part_specs_by_filter_argsStandardScheme(); + private static class get_partitions_by_filter_argsStandardSchemeFactory implements SchemeFactory { + public get_partitions_by_filter_argsStandardScheme getScheme() { + return new get_partitions_by_filter_argsStandardScheme(); } } - private static class get_part_specs_by_filter_argsStandardScheme extends StandardScheme { + private static class get_partitions_by_filter_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -126902,8 +126745,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f } break; case 4: // MAX_PARTS - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.max_parts = iprot.readI32(); + if (schemeField.type == org.apache.thrift.protocol.TType.I16) { + struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -126918,7 +126761,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -126938,7 +126781,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldEnd(); } oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); - oprot.writeI32(struct.max_parts); + oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -126946,16 +126789,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ } - private static class get_part_specs_by_filter_argsTupleSchemeFactory implements SchemeFactory { - public get_part_specs_by_filter_argsTupleScheme getScheme() { - return new get_part_specs_by_filter_argsTupleScheme(); + private static class get_partitions_by_filter_argsTupleSchemeFactory implements SchemeFactory { + public get_partitions_by_filter_argsTupleScheme getScheme() { + return new get_partitions_by_filter_argsTupleScheme(); } } - private static class get_part_specs_by_filter_argsTupleScheme extends TupleScheme { + private static class get_partitions_by_filter_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -126981,12 +126824,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f oprot.writeString(struct.filter); } if (struct.isSetMax_parts()) { - oprot.writeI32(struct.max_parts); + oprot.writeI16(struct.max_parts); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { @@ -127002,7 +126845,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi struct.setFilterIsSet(true); } if (incoming.get(3)) { - struct.max_parts = iprot.readI32(); + struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } } @@ -127010,8 +126853,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_part_specs_by_filter_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_filter_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -127019,11 +126862,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_part_specs_by_filter_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_part_specs_by_filter_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partitions_by_filter_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_by_filter_resultTupleSchemeFactory()); } - private List success; // required + private List success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -127097,20 +126940,20 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpec.class)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_result.class, metaDataMap); } - public get_part_specs_by_filter_result() { + public get_partitions_by_filter_result() { } - public get_part_specs_by_filter_result( - List success, + public get_partitions_by_filter_result( + List success, MetaException o1, NoSuchObjectException o2) { @@ -127123,11 +126966,11 @@ public get_part_specs_by_filter_result( /** * Performs a deep copy on other. */ - public get_part_specs_by_filter_result(get_part_specs_by_filter_result other) { + public get_partitions_by_filter_result(get_partitions_by_filter_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success.size()); - for (PartitionSpec other_element : other.success) { - __this__success.add(new PartitionSpec(other_element)); + List __this__success = new ArrayList(other.success.size()); + for (Partition other_element : other.success) { + __this__success.add(new Partition(other_element)); } this.success = __this__success; } @@ -127139,8 +126982,8 @@ public get_part_specs_by_filter_result(get_part_specs_by_filter_result other) { } } - public get_part_specs_by_filter_result deepCopy() { - return new get_part_specs_by_filter_result(this); + public get_partitions_by_filter_result deepCopy() { + return new get_partitions_by_filter_result(this); } @Override @@ -127154,22 +126997,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(PartitionSpec elem) { + public void addToSuccess(Partition elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -127240,7 +127083,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((List)value); } break; @@ -127299,12 +127142,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_part_specs_by_filter_result) - return this.equals((get_part_specs_by_filter_result)that); + if (that instanceof get_partitions_by_filter_result) + return this.equals((get_partitions_by_filter_result)that); return false; } - public boolean equals(get_part_specs_by_filter_result that) { + public boolean equals(get_partitions_by_filter_result that) { if (that == null) return false; @@ -127361,7 +127204,7 @@ public int hashCode() { } @Override - public int compareTo(get_part_specs_by_filter_result other) { + public int compareTo(get_partitions_by_filter_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -127415,7 +127258,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_part_specs_by_filter_result("); + StringBuilder sb = new StringBuilder("get_partitions_by_filter_result("); boolean first = true; sb.append("success:"); @@ -127466,15 +127309,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_part_specs_by_filter_resultStandardSchemeFactory implements SchemeFactory { - public get_part_specs_by_filter_resultStandardScheme getScheme() { - return new get_part_specs_by_filter_resultStandardScheme(); + private static class get_partitions_by_filter_resultStandardSchemeFactory implements SchemeFactory { + public get_partitions_by_filter_resultStandardScheme getScheme() { + return new get_partitions_by_filter_resultStandardScheme(); } } - private static class get_part_specs_by_filter_resultStandardScheme extends StandardScheme { + private static class get_partitions_by_filter_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -127488,11 +127331,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); - struct.success = new ArrayList(_list1606.size); - PartitionSpec _elem1607; + struct.success = new ArrayList(_list1606.size); + Partition _elem1607; for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) { - _elem1607 = new PartitionSpec(); + _elem1607 = new Partition(); _elem1607.read(iprot); struct.success.add(_elem1607); } @@ -127530,7 +127373,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -127538,7 +127381,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1609 : struct.success) + for (Partition _iter1609 : struct.success) { _iter1609.write(oprot); } @@ -127562,16 +127405,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ } - private static class get_part_specs_by_filter_resultTupleSchemeFactory implements SchemeFactory { - public get_part_specs_by_filter_resultTupleScheme getScheme() { - return new get_part_specs_by_filter_resultTupleScheme(); + private static class get_partitions_by_filter_resultTupleSchemeFactory implements SchemeFactory { + public get_partitions_by_filter_resultTupleScheme getScheme() { + return new get_partitions_by_filter_resultTupleScheme(); } } - private static class get_part_specs_by_filter_resultTupleScheme extends TupleScheme { + private static class get_partitions_by_filter_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -127587,7 +127430,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1610 : struct.success) + for (Partition _iter1610 : struct.success) { _iter1610.write(oprot); } @@ -127602,17 +127445,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1611.size); - PartitionSpec _elem1612; + struct.success = new ArrayList(_list1611.size); + Partition _elem1612; for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) { - _elem1612 = new PartitionSpec(); + _elem1612 = new Partition(); _elem1612.read(iprot); struct.success.add(_elem1612); } @@ -127634,22 +127477,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_part_specs_by_filter_args"); - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I32, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_by_expr_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_by_expr_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_part_specs_by_filter_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_part_specs_by_filter_argsTupleSchemeFactory()); } - private PartitionsByExprRequest req; // required + private String db_name; // required + private String tbl_name; // required + private String filter; // required + private int max_parts; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); + DB_NAME((short)1, "db_name"), + TBL_NAME((short)2, "tbl_name"), + FILTER((short)3, "filter"), + MAX_PARTS((short)4, "max_parts"); private static final Map byName = new HashMap(); @@ -127664,8 +127516,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // REQ - return REQ; + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // FILTER + return FILTER; + case 4: // MAX_PARTS + return MAX_PARTS; default: return null; } @@ -127706,73 +127564,194 @@ public String getFieldName() { } // isset id assignments + private static final int __MAX_PARTS_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsByExprRequest.class))); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_expr_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_args.class, metaDataMap); } - public get_partitions_by_expr_args() { + public get_part_specs_by_filter_args() { + this.max_parts = -1; + } - public get_partitions_by_expr_args( - PartitionsByExprRequest req) + public get_part_specs_by_filter_args( + String db_name, + String tbl_name, + String filter, + int max_parts) { this(); - this.req = req; + this.db_name = db_name; + this.tbl_name = tbl_name; + this.filter = filter; + this.max_parts = max_parts; + setMax_partsIsSet(true); } /** * Performs a deep copy on other. */ - public get_partitions_by_expr_args(get_partitions_by_expr_args other) { - if (other.isSetReq()) { - this.req = new PartitionsByExprRequest(other.req); + public get_part_specs_by_filter_args(get_part_specs_by_filter_args other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetFilter()) { + this.filter = other.filter; } + this.max_parts = other.max_parts; } - public get_partitions_by_expr_args deepCopy() { - return new get_partitions_by_expr_args(this); + public get_part_specs_by_filter_args deepCopy() { + return new get_part_specs_by_filter_args(this); } @Override public void clear() { - this.req = null; + this.db_name = null; + this.tbl_name = null; + this.filter = null; + this.max_parts = -1; + } - public PartitionsByExprRequest getReq() { - return this.req; + public String getDb_name() { + return this.db_name; } - public void setReq(PartitionsByExprRequest req) { - this.req = req; + public void setDb_name(String db_name) { + this.db_name = db_name; } - public void unsetReq() { - this.req = null; + public void unsetDb_name() { + this.db_name = null; } - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; } - public void setReqIsSet(boolean value) { + public void setDb_nameIsSet(boolean value) { if (!value) { - this.req = null; + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public String getFilter() { + return this.filter; + } + + public void setFilter(String filter) { + this.filter = filter; + } + + public void unsetFilter() { + this.filter = null; + } + + /** Returns true if field filter is set (has been assigned a value) and false otherwise */ + public boolean isSetFilter() { + return this.filter != null; + } + + public void setFilterIsSet(boolean value) { + if (!value) { + this.filter = null; } } + public int getMax_parts() { + return this.max_parts; + } + + public void setMax_parts(int max_parts) { + this.max_parts = max_parts; + setMax_partsIsSet(true); + } + + public void unsetMax_parts() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_PARTS_ISSET_ID); + } + + /** Returns true if field max_parts is set (has been assigned a value) and false otherwise */ + public boolean isSetMax_parts() { + return EncodingUtils.testBit(__isset_bitfield, __MAX_PARTS_ISSET_ID); + } + + public void setMax_partsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { - case REQ: + case DB_NAME: if (value == null) { - unsetReq(); + unsetDb_name(); } else { - setReq((PartitionsByExprRequest)value); + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case FILTER: + if (value == null) { + unsetFilter(); + } else { + setFilter((String)value); + } + break; + + case MAX_PARTS: + if (value == null) { + unsetMax_parts(); + } else { + setMax_parts((Integer)value); } break; @@ -127781,8 +127760,17 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case REQ: - return getReq(); + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case FILTER: + return getFilter(); + + case MAX_PARTS: + return getMax_parts(); } throw new IllegalStateException(); @@ -127795,8 +127783,14 @@ public boolean isSet(_Fields field) { } switch (field) { - case REQ: - return isSetReq(); + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case FILTER: + return isSetFilter(); + case MAX_PARTS: + return isSetMax_parts(); } throw new IllegalStateException(); } @@ -127805,21 +127799,48 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partitions_by_expr_args) - return this.equals((get_partitions_by_expr_args)that); + if (that instanceof get_part_specs_by_filter_args) + return this.equals((get_part_specs_by_filter_args)that); return false; } - public boolean equals(get_partitions_by_expr_args that) { + public boolean equals(get_part_specs_by_filter_args that) { if (that == null) return false; - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) return false; - if (!this.req.equals(that.req)) + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_filter = true && this.isSetFilter(); + boolean that_present_filter = true && that.isSetFilter(); + if (this_present_filter || that_present_filter) { + if (!(this_present_filter && that_present_filter)) + return false; + if (!this.filter.equals(that.filter)) + return false; + } + + boolean this_present_max_parts = true; + boolean that_present_max_parts = true; + if (this_present_max_parts || that_present_max_parts) { + if (!(this_present_max_parts && that_present_max_parts)) + return false; + if (this.max_parts != that.max_parts) return false; } @@ -127830,28 +127851,73 @@ public boolean equals(get_partitions_by_expr_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + boolean present_filter = true && (isSetFilter()); + list.add(present_filter); + if (present_filter) + list.add(filter); + + boolean present_max_parts = true; + list.add(present_max_parts); + if (present_max_parts) + list.add(max_parts); return list.hashCode(); } @Override - public int compareTo(get_partitions_by_expr_args other) { + public int compareTo(get_part_specs_by_filter_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFilter()).compareTo(other.isSetFilter()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFilter()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filter, other.filter); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMax_parts()).compareTo(other.isSetMax_parts()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMax_parts()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_parts, other.max_parts); if (lastComparison != 0) { return lastComparison; } @@ -127873,16 +127939,36 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partitions_by_expr_args("); + StringBuilder sb = new StringBuilder("get_part_specs_by_filter_args("); boolean first = true; - sb.append("req:"); - if (this.req == null) { + sb.append("db_name:"); + if (this.db_name == null) { sb.append("null"); } else { - sb.append(this.req); + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("filter:"); + if (this.filter == null) { + sb.append("null"); + } else { + sb.append(this.filter); } first = false; + if (!first) sb.append(", "); + sb.append("max_parts:"); + sb.append(this.max_parts); + first = false; sb.append(")"); return sb.toString(); } @@ -127890,9 +127976,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (req != null) { - req.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -127905,21 +127988,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_partitions_by_expr_argsStandardSchemeFactory implements SchemeFactory { - public get_partitions_by_expr_argsStandardScheme getScheme() { - return new get_partitions_by_expr_argsStandardScheme(); + private static class get_part_specs_by_filter_argsStandardSchemeFactory implements SchemeFactory { + public get_part_specs_by_filter_argsStandardScheme getScheme() { + return new get_part_specs_by_filter_argsStandardScheme(); } } - private static class get_partitions_by_expr_argsStandardScheme extends StandardScheme { + private static class get_part_specs_by_filter_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -127929,11 +128014,34 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_e break; } switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new PartitionsByExprRequest(); - struct.req.read(iprot); - struct.setReqIsSet(true); + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // FILTER + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.filter = iprot.readString(); + struct.setFilterIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // MAX_PARTS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.max_parts = iprot.readI32(); + struct.setMax_partsIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -127947,70 +128055,1099 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_e struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); oprot.writeFieldEnd(); } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.filter != null) { + oprot.writeFieldBegin(FILTER_FIELD_DESC); + oprot.writeString(struct.filter); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); + oprot.writeI32(struct.max_parts); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_partitions_by_expr_argsTupleSchemeFactory implements SchemeFactory { - public get_partitions_by_expr_argsTupleScheme getScheme() { - return new get_partitions_by_expr_argsTupleScheme(); + private static class get_part_specs_by_filter_argsTupleSchemeFactory implements SchemeFactory { + public get_part_specs_by_filter_argsTupleScheme getScheme() { + return new get_part_specs_by_filter_argsTupleScheme(); } } - private static class get_partitions_by_expr_argsTupleScheme extends TupleScheme { + private static class get_part_specs_by_filter_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetReq()) { + if (struct.isSetDb_name()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); + if (struct.isSetTbl_name()) { + optionals.set(1); + } + if (struct.isSetFilter()) { + optionals.set(2); + } + if (struct.isSetMax_parts()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); + } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetFilter()) { + oprot.writeString(struct.filter); + } + if (struct.isSetMax_parts()) { + oprot.writeI32(struct.max_parts); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.req = new PartitionsByExprRequest(); - struct.req.read(iprot); - struct.setReqIsSet(true); + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + struct.filter = iprot.readString(); + struct.setFilterIsSet(true); + } + if (incoming.get(3)) { + struct.max_parts = iprot.readI32(); + struct.setMax_partsIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_part_specs_by_filter_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_by_expr_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_by_expr_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_part_specs_by_filter_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_part_specs_by_filter_resultTupleSchemeFactory()); } - private PartitionsByExprResult success; // required + private List success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpec.class)))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_result.class, metaDataMap); + } + + public get_part_specs_by_filter_result() { + } + + public get_part_specs_by_filter_result( + List success, + MetaException o1, + NoSuchObjectException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_part_specs_by_filter_result(get_part_specs_by_filter_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success.size()); + for (PartitionSpec other_element : other.success) { + __this__success.add(new PartitionSpec(other_element)); + } + this.success = __this__success; + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new NoSuchObjectException(other.o2); + } + } + + public get_part_specs_by_filter_result deepCopy() { + return new get_part_specs_by_filter_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(PartitionSpec elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchObjectException getO2() { + return this.o2; + } + + public void setO2(NoSuchObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((NoSuchObjectException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_part_specs_by_filter_result) + return this.equals((get_part_specs_by_filter_result)that); + return false; + } + + public boolean equals(get_part_specs_by_filter_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_part_specs_by_filter_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_part_specs_by_filter_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_part_specs_by_filter_resultStandardSchemeFactory implements SchemeFactory { + public get_part_specs_by_filter_resultStandardScheme getScheme() { + return new get_part_specs_by_filter_resultStandardScheme(); + } + } + + private static class get_part_specs_by_filter_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); + struct.success = new ArrayList(_list1614.size); + PartitionSpec _elem1615; + for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) + { + _elem1615 = new PartitionSpec(); + _elem1615.read(iprot); + struct.success.add(_elem1615); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new NoSuchObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (PartitionSpec _iter1617 : struct.success) + { + _iter1617.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_part_specs_by_filter_resultTupleSchemeFactory implements SchemeFactory { + public get_part_specs_by_filter_resultTupleScheme getScheme() { + return new get_part_specs_by_filter_resultTupleScheme(); + } + } + + private static class get_part_specs_by_filter_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (PartitionSpec _iter1618 : struct.success) + { + _iter1618.write(oprot); + } + } + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1619.size); + PartitionSpec _elem1620; + for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) + { + _elem1620 = new PartitionSpec(); + _elem1620.read(iprot); + struct.success.add(_elem1620); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new NoSuchObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_args"); + + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_partitions_by_expr_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_by_expr_argsTupleSchemeFactory()); + } + + private PartitionsByExprRequest req; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQ((short)1, "req"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQ + return REQ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsByExprRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_expr_args.class, metaDataMap); + } + + public get_partitions_by_expr_args() { + } + + public get_partitions_by_expr_args( + PartitionsByExprRequest req) + { + this(); + this.req = req; + } + + /** + * Performs a deep copy on other. + */ + public get_partitions_by_expr_args(get_partitions_by_expr_args other) { + if (other.isSetReq()) { + this.req = new PartitionsByExprRequest(other.req); + } + } + + public get_partitions_by_expr_args deepCopy() { + return new get_partitions_by_expr_args(this); + } + + @Override + public void clear() { + this.req = null; + } + + public PartitionsByExprRequest getReq() { + return this.req; + } + + public void setReq(PartitionsByExprRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((PartitionsByExprRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQ: + return getReq(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQ: + return isSetReq(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_partitions_by_expr_args) + return this.equals((get_partitions_by_expr_args)that); + return false; + } + + public boolean equals(get_partitions_by_expr_args that) { + if (that == null) + return false; + + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + + return list.hashCode(); + } + + @Override + public int compareTo(get_partitions_by_expr_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_partitions_by_expr_args("); + boolean first = true; + + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (req != null) { + req.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_partitions_by_expr_argsStandardSchemeFactory implements SchemeFactory { + public get_partitions_by_expr_argsStandardScheme getScheme() { + return new get_partitions_by_expr_argsStandardScheme(); + } + } + + private static class get_partitions_by_expr_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new PartitionsByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_partitions_by_expr_argsTupleSchemeFactory implements SchemeFactory { + public get_partitions_by_expr_argsTupleScheme getScheme() { + return new get_partitions_by_expr_argsTupleScheme(); + } + } + + private static class get_partitions_by_expr_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new PartitionsByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_partitions_by_expr_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_by_expr_resultTupleSchemeFactory()); + } + + private PartitionsByExprResult success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -130198,13 +131335,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); - struct.names = new ArrayList(_list1614.size); - String _elem1615; - for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) + org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); + struct.names = new ArrayList(_list1622.size); + String _elem1623; + for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) { - _elem1615 = iprot.readString(); - struct.names.add(_elem1615); + _elem1623 = iprot.readString(); + struct.names.add(_elem1623); } iprot.readListEnd(); } @@ -130240,9 +131377,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1617 : struct.names) + for (String _iter1625 : struct.names) { - oprot.writeString(_iter1617); + oprot.writeString(_iter1625); } oprot.writeListEnd(); } @@ -130285,9 +131422,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1618 : struct.names) + for (String _iter1626 : struct.names) { - oprot.writeString(_iter1618); + oprot.writeString(_iter1626); } } } @@ -130307,13 +131444,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1619.size); - String _elem1620; - for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) + org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1627.size); + String _elem1628; + for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) { - _elem1620 = iprot.readString(); - struct.names.add(_elem1620); + _elem1628 = iprot.readString(); + struct.names.add(_elem1628); } } struct.setNamesIsSet(true); @@ -130800,14 +131937,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); - struct.success = new ArrayList(_list1622.size); - Partition _elem1623; - for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) + org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); + struct.success = new ArrayList(_list1630.size); + Partition _elem1631; + for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) { - _elem1623 = new Partition(); - _elem1623.read(iprot); - struct.success.add(_elem1623); + _elem1631 = new Partition(); + _elem1631.read(iprot); + struct.success.add(_elem1631); } iprot.readListEnd(); } @@ -130851,9 +131988,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1625 : struct.success) + for (Partition _iter1633 : struct.success) { - _iter1625.write(oprot); + _iter1633.write(oprot); } oprot.writeListEnd(); } @@ -130900,9 +132037,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1626 : struct.success) + for (Partition _iter1634 : struct.success) { - _iter1626.write(oprot); + _iter1634.write(oprot); } } } @@ -130920,14 +132057,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1627.size); - Partition _elem1628; - for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) + org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1635.size); + Partition _elem1636; + for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) { - _elem1628 = new Partition(); - _elem1628.read(iprot); - struct.success.add(_elem1628); + _elem1636 = new Partition(); + _elem1636.read(iprot); + struct.success.add(_elem1636); } } struct.setSuccessIsSet(true); @@ -133415,14 +134552,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1630.size); - Partition _elem1631; - for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) + org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1638.size); + Partition _elem1639; + for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) { - _elem1631 = new Partition(); - _elem1631.read(iprot); - struct.new_parts.add(_elem1631); + _elem1639 = new Partition(); + _elem1639.read(iprot); + struct.new_parts.add(_elem1639); } iprot.readListEnd(); } @@ -133458,9 +134595,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1633 : struct.new_parts) + for (Partition _iter1641 : struct.new_parts) { - _iter1633.write(oprot); + _iter1641.write(oprot); } oprot.writeListEnd(); } @@ -133503,9 +134640,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1634 : struct.new_parts) + for (Partition _iter1642 : struct.new_parts) { - _iter1634.write(oprot); + _iter1642.write(oprot); } } } @@ -133525,14 +134662,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1635.size); - Partition _elem1636; - for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) + org.apache.thrift.protocol.TList _list1643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1643.size); + Partition _elem1644; + for (int _i1645 = 0; _i1645 < _list1643.size; ++_i1645) { - _elem1636 = new Partition(); - _elem1636.read(iprot); - struct.new_parts.add(_elem1636); + _elem1644 = new Partition(); + _elem1644.read(iprot); + struct.new_parts.add(_elem1644); } } struct.setNew_partsIsSet(true); @@ -134585,14 +135722,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1638.size); - Partition _elem1639; - for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) + org.apache.thrift.protocol.TList _list1646 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1646.size); + Partition _elem1647; + for (int _i1648 = 0; _i1648 < _list1646.size; ++_i1648) { - _elem1639 = new Partition(); - _elem1639.read(iprot); - struct.new_parts.add(_elem1639); + _elem1647 = new Partition(); + _elem1647.read(iprot); + struct.new_parts.add(_elem1647); } iprot.readListEnd(); } @@ -134637,9 +135774,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1641 : struct.new_parts) + for (Partition _iter1649 : struct.new_parts) { - _iter1641.write(oprot); + _iter1649.write(oprot); } oprot.writeListEnd(); } @@ -134690,9 +135827,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1642 : struct.new_parts) + for (Partition _iter1650 : struct.new_parts) { - _iter1642.write(oprot); + _iter1650.write(oprot); } } } @@ -134715,14 +135852,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1643.size); - Partition _elem1644; - for (int _i1645 = 0; _i1645 < _list1643.size; ++_i1645) + org.apache.thrift.protocol.TList _list1651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1651.size); + Partition _elem1652; + for (int _i1653 = 0; _i1653 < _list1651.size; ++_i1653) { - _elem1644 = new Partition(); - _elem1644.read(iprot); - struct.new_parts.add(_elem1644); + _elem1652 = new Partition(); + _elem1652.read(iprot); + struct.new_parts.add(_elem1652); } } struct.setNew_partsIsSet(true); @@ -137861,13 +138998,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1646 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1646.size); - String _elem1647; - for (int _i1648 = 0; _i1648 < _list1646.size; ++_i1648) + org.apache.thrift.protocol.TList _list1654 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1654.size); + String _elem1655; + for (int _i1656 = 0; _i1656 < _list1654.size; ++_i1656) { - _elem1647 = iprot.readString(); - struct.part_vals.add(_elem1647); + _elem1655 = iprot.readString(); + struct.part_vals.add(_elem1655); } iprot.readListEnd(); } @@ -137912,9 +139049,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1649 : struct.part_vals) + for (String _iter1657 : struct.part_vals) { - oprot.writeString(_iter1649); + oprot.writeString(_iter1657); } oprot.writeListEnd(); } @@ -137965,9 +139102,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1650 : struct.part_vals) + for (String _iter1658 : struct.part_vals) { - oprot.writeString(_iter1650); + oprot.writeString(_iter1658); } } } @@ -137990,13 +139127,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1651.size); - String _elem1652; - for (int _i1653 = 0; _i1653 < _list1651.size; ++_i1653) + org.apache.thrift.protocol.TList _list1659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1659.size); + String _elem1660; + for (int _i1661 = 0; _i1661 < _list1659.size; ++_i1661) { - _elem1652 = iprot.readString(); - struct.part_vals.add(_elem1652); + _elem1660 = iprot.readString(); + struct.part_vals.add(_elem1660); } } struct.setPart_valsIsSet(true); @@ -139808,13 +140945,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1654 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1654.size); - String _elem1655; - for (int _i1656 = 0; _i1656 < _list1654.size; ++_i1656) + org.apache.thrift.protocol.TList _list1662 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1662.size); + String _elem1663; + for (int _i1664 = 0; _i1664 < _list1662.size; ++_i1664) { - _elem1655 = iprot.readString(); - struct.part_vals.add(_elem1655); + _elem1663 = iprot.readString(); + struct.part_vals.add(_elem1663); } iprot.readListEnd(); } @@ -139848,9 +140985,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1657 : struct.part_vals) + for (String _iter1665 : struct.part_vals) { - oprot.writeString(_iter1657); + oprot.writeString(_iter1665); } oprot.writeListEnd(); } @@ -139887,9 +141024,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1658 : struct.part_vals) + for (String _iter1666 : struct.part_vals) { - oprot.writeString(_iter1658); + oprot.writeString(_iter1666); } } } @@ -139904,13 +141041,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1659.size); - String _elem1660; - for (int _i1661 = 0; _i1661 < _list1659.size; ++_i1661) + org.apache.thrift.protocol.TList _list1667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1667.size); + String _elem1668; + for (int _i1669 = 0; _i1669 < _list1667.size; ++_i1669) { - _elem1660 = iprot.readString(); - struct.part_vals.add(_elem1660); + _elem1668 = iprot.readString(); + struct.part_vals.add(_elem1668); } } struct.setPart_valsIsSet(true); @@ -142065,13 +143202,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1662 = iprot.readListBegin(); - struct.success = new ArrayList(_list1662.size); - String _elem1663; - for (int _i1664 = 0; _i1664 < _list1662.size; ++_i1664) + org.apache.thrift.protocol.TList _list1670 = iprot.readListBegin(); + struct.success = new ArrayList(_list1670.size); + String _elem1671; + for (int _i1672 = 0; _i1672 < _list1670.size; ++_i1672) { - _elem1663 = iprot.readString(); - struct.success.add(_elem1663); + _elem1671 = iprot.readString(); + struct.success.add(_elem1671); } iprot.readListEnd(); } @@ -142106,9 +143243,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1665 : struct.success) + for (String _iter1673 : struct.success) { - oprot.writeString(_iter1665); + oprot.writeString(_iter1673); } oprot.writeListEnd(); } @@ -142147,9 +143284,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1666 : struct.success) + for (String _iter1674 : struct.success) { - oprot.writeString(_iter1666); + oprot.writeString(_iter1674); } } } @@ -142164,13 +143301,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1667.size); - String _elem1668; - for (int _i1669 = 0; _i1669 < _list1667.size; ++_i1669) + org.apache.thrift.protocol.TList _list1675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1675.size); + String _elem1676; + for (int _i1677 = 0; _i1677 < _list1675.size; ++_i1677) { - _elem1668 = iprot.readString(); - struct.success.add(_elem1668); + _elem1676 = iprot.readString(); + struct.success.add(_elem1676); } } struct.setSuccessIsSet(true); @@ -142933,15 +144070,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1670 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1670.size); - String _key1671; - String _val1672; - for (int _i1673 = 0; _i1673 < _map1670.size; ++_i1673) + org.apache.thrift.protocol.TMap _map1678 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1678.size); + String _key1679; + String _val1680; + for (int _i1681 = 0; _i1681 < _map1678.size; ++_i1681) { - _key1671 = iprot.readString(); - _val1672 = iprot.readString(); - struct.success.put(_key1671, _val1672); + _key1679 = iprot.readString(); + _val1680 = iprot.readString(); + struct.success.put(_key1679, _val1680); } iprot.readMapEnd(); } @@ -142976,10 +144113,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1674 : struct.success.entrySet()) + for (Map.Entry _iter1682 : struct.success.entrySet()) { - oprot.writeString(_iter1674.getKey()); - oprot.writeString(_iter1674.getValue()); + oprot.writeString(_iter1682.getKey()); + oprot.writeString(_iter1682.getValue()); } oprot.writeMapEnd(); } @@ -143018,10 +144155,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1675 : struct.success.entrySet()) + for (Map.Entry _iter1683 : struct.success.entrySet()) { - oprot.writeString(_iter1675.getKey()); - oprot.writeString(_iter1675.getValue()); + oprot.writeString(_iter1683.getKey()); + oprot.writeString(_iter1683.getValue()); } } } @@ -143036,15 +144173,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1676 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1676.size); - String _key1677; - String _val1678; - for (int _i1679 = 0; _i1679 < _map1676.size; ++_i1679) + org.apache.thrift.protocol.TMap _map1684 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1684.size); + String _key1685; + String _val1686; + for (int _i1687 = 0; _i1687 < _map1684.size; ++_i1687) { - _key1677 = iprot.readString(); - _val1678 = iprot.readString(); - struct.success.put(_key1677, _val1678); + _key1685 = iprot.readString(); + _val1686 = iprot.readString(); + struct.success.put(_key1685, _val1686); } } struct.setSuccessIsSet(true); @@ -143639,15 +144776,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1680 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1680.size); - String _key1681; - String _val1682; - for (int _i1683 = 0; _i1683 < _map1680.size; ++_i1683) + org.apache.thrift.protocol.TMap _map1688 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1688.size); + String _key1689; + String _val1690; + for (int _i1691 = 0; _i1691 < _map1688.size; ++_i1691) { - _key1681 = iprot.readString(); - _val1682 = iprot.readString(); - struct.part_vals.put(_key1681, _val1682); + _key1689 = iprot.readString(); + _val1690 = iprot.readString(); + struct.part_vals.put(_key1689, _val1690); } iprot.readMapEnd(); } @@ -143691,10 +144828,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1684 : struct.part_vals.entrySet()) + for (Map.Entry _iter1692 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1684.getKey()); - oprot.writeString(_iter1684.getValue()); + oprot.writeString(_iter1692.getKey()); + oprot.writeString(_iter1692.getValue()); } oprot.writeMapEnd(); } @@ -143745,10 +144882,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1685 : struct.part_vals.entrySet()) + for (Map.Entry _iter1693 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1685.getKey()); - oprot.writeString(_iter1685.getValue()); + oprot.writeString(_iter1693.getKey()); + oprot.writeString(_iter1693.getValue()); } } } @@ -143771,15 +144908,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1686 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1686.size); - String _key1687; - String _val1688; - for (int _i1689 = 0; _i1689 < _map1686.size; ++_i1689) + org.apache.thrift.protocol.TMap _map1694 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1694.size); + String _key1695; + String _val1696; + for (int _i1697 = 0; _i1697 < _map1694.size; ++_i1697) { - _key1687 = iprot.readString(); - _val1688 = iprot.readString(); - struct.part_vals.put(_key1687, _val1688); + _key1695 = iprot.readString(); + _val1696 = iprot.readString(); + struct.part_vals.put(_key1695, _val1696); } } struct.setPart_valsIsSet(true); @@ -145263,15 +146400,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1690 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1690.size); - String _key1691; - String _val1692; - for (int _i1693 = 0; _i1693 < _map1690.size; ++_i1693) + org.apache.thrift.protocol.TMap _map1698 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1698.size); + String _key1699; + String _val1700; + for (int _i1701 = 0; _i1701 < _map1698.size; ++_i1701) { - _key1691 = iprot.readString(); - _val1692 = iprot.readString(); - struct.part_vals.put(_key1691, _val1692); + _key1699 = iprot.readString(); + _val1700 = iprot.readString(); + struct.part_vals.put(_key1699, _val1700); } iprot.readMapEnd(); } @@ -145315,10 +146452,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1694 : struct.part_vals.entrySet()) + for (Map.Entry _iter1702 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1694.getKey()); - oprot.writeString(_iter1694.getValue()); + oprot.writeString(_iter1702.getKey()); + oprot.writeString(_iter1702.getValue()); } oprot.writeMapEnd(); } @@ -145369,10 +146506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1695 : struct.part_vals.entrySet()) + for (Map.Entry _iter1703 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1695.getKey()); - oprot.writeString(_iter1695.getValue()); + oprot.writeString(_iter1703.getKey()); + oprot.writeString(_iter1703.getValue()); } } } @@ -145395,15 +146532,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1696 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1696.size); - String _key1697; - String _val1698; - for (int _i1699 = 0; _i1699 < _map1696.size; ++_i1699) + org.apache.thrift.protocol.TMap _map1704 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1704.size); + String _key1705; + String _val1706; + for (int _i1707 = 0; _i1707 < _map1704.size; ++_i1707) { - _key1697 = iprot.readString(); - _val1698 = iprot.readString(); - struct.part_vals.put(_key1697, _val1698); + _key1705 = iprot.readString(); + _val1706 = iprot.readString(); + struct.part_vals.put(_key1705, _val1706); } } struct.setPart_valsIsSet(true); @@ -170267,13 +171404,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1700 = iprot.readListBegin(); - struct.success = new ArrayList(_list1700.size); - String _elem1701; - for (int _i1702 = 0; _i1702 < _list1700.size; ++_i1702) + org.apache.thrift.protocol.TList _list1708 = iprot.readListBegin(); + struct.success = new ArrayList(_list1708.size); + String _elem1709; + for (int _i1710 = 0; _i1710 < _list1708.size; ++_i1710) { - _elem1701 = iprot.readString(); - struct.success.add(_elem1701); + _elem1709 = iprot.readString(); + struct.success.add(_elem1709); } iprot.readListEnd(); } @@ -170308,9 +171445,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1703 : struct.success) + for (String _iter1711 : struct.success) { - oprot.writeString(_iter1703); + oprot.writeString(_iter1711); } oprot.writeListEnd(); } @@ -170349,9 +171486,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1704 : struct.success) + for (String _iter1712 : struct.success) { - oprot.writeString(_iter1704); + oprot.writeString(_iter1712); } } } @@ -170366,13 +171503,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1705 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1705.size); - String _elem1706; - for (int _i1707 = 0; _i1707 < _list1705.size; ++_i1707) + org.apache.thrift.protocol.TList _list1713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1713.size); + String _elem1714; + for (int _i1715 = 0; _i1715 < _list1713.size; ++_i1715) { - _elem1706 = iprot.readString(); - struct.success.add(_elem1706); + _elem1714 = iprot.readString(); + struct.success.add(_elem1714); } } struct.setSuccessIsSet(true); @@ -174427,13 +175564,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1708 = iprot.readListBegin(); - struct.success = new ArrayList(_list1708.size); - String _elem1709; - for (int _i1710 = 0; _i1710 < _list1708.size; ++_i1710) + org.apache.thrift.protocol.TList _list1716 = iprot.readListBegin(); + struct.success = new ArrayList(_list1716.size); + String _elem1717; + for (int _i1718 = 0; _i1718 < _list1716.size; ++_i1718) { - _elem1709 = iprot.readString(); - struct.success.add(_elem1709); + _elem1717 = iprot.readString(); + struct.success.add(_elem1717); } iprot.readListEnd(); } @@ -174468,9 +175605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1711 : struct.success) + for (String _iter1719 : struct.success) { - oprot.writeString(_iter1711); + oprot.writeString(_iter1719); } oprot.writeListEnd(); } @@ -174509,9 +175646,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1712 : struct.success) + for (String _iter1720 : struct.success) { - oprot.writeString(_iter1712); + oprot.writeString(_iter1720); } } } @@ -174526,13 +175663,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1713.size); - String _elem1714; - for (int _i1715 = 0; _i1715 < _list1713.size; ++_i1715) + org.apache.thrift.protocol.TList _list1721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1721.size); + String _elem1722; + for (int _i1723 = 0; _i1723 < _list1721.size; ++_i1723) { - _elem1714 = iprot.readString(); - struct.success.add(_elem1714); + _elem1722 = iprot.readString(); + struct.success.add(_elem1722); } } struct.setSuccessIsSet(true); @@ -177823,14 +178960,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1716 = iprot.readListBegin(); - struct.success = new ArrayList(_list1716.size); - Role _elem1717; - for (int _i1718 = 0; _i1718 < _list1716.size; ++_i1718) + org.apache.thrift.protocol.TList _list1724 = iprot.readListBegin(); + struct.success = new ArrayList(_list1724.size); + Role _elem1725; + for (int _i1726 = 0; _i1726 < _list1724.size; ++_i1726) { - _elem1717 = new Role(); - _elem1717.read(iprot); - struct.success.add(_elem1717); + _elem1725 = new Role(); + _elem1725.read(iprot); + struct.success.add(_elem1725); } iprot.readListEnd(); } @@ -177865,9 +179002,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1719 : struct.success) + for (Role _iter1727 : struct.success) { - _iter1719.write(oprot); + _iter1727.write(oprot); } oprot.writeListEnd(); } @@ -177906,9 +179043,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1720 : struct.success) + for (Role _iter1728 : struct.success) { - _iter1720.write(oprot); + _iter1728.write(oprot); } } } @@ -177923,14 +179060,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1721.size); - Role _elem1722; - for (int _i1723 = 0; _i1723 < _list1721.size; ++_i1723) + org.apache.thrift.protocol.TList _list1729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1729.size); + Role _elem1730; + for (int _i1731 = 0; _i1731 < _list1729.size; ++_i1731) { - _elem1722 = new Role(); - _elem1722.read(iprot); - struct.success.add(_elem1722); + _elem1730 = new Role(); + _elem1730.read(iprot); + struct.success.add(_elem1730); } } struct.setSuccessIsSet(true); @@ -180935,13 +182072,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1724 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1724.size); - String _elem1725; - for (int _i1726 = 0; _i1726 < _list1724.size; ++_i1726) + org.apache.thrift.protocol.TList _list1732 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1732.size); + String _elem1733; + for (int _i1734 = 0; _i1734 < _list1732.size; ++_i1734) { - _elem1725 = iprot.readString(); - struct.group_names.add(_elem1725); + _elem1733 = iprot.readString(); + struct.group_names.add(_elem1733); } iprot.readListEnd(); } @@ -180977,9 +182114,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1727 : struct.group_names) + for (String _iter1735 : struct.group_names) { - oprot.writeString(_iter1727); + oprot.writeString(_iter1735); } oprot.writeListEnd(); } @@ -181022,9 +182159,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1728 : struct.group_names) + for (String _iter1736 : struct.group_names) { - oprot.writeString(_iter1728); + oprot.writeString(_iter1736); } } } @@ -181045,13 +182182,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1729.size); - String _elem1730; - for (int _i1731 = 0; _i1731 < _list1729.size; ++_i1731) + org.apache.thrift.protocol.TList _list1737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1737.size); + String _elem1738; + for (int _i1739 = 0; _i1739 < _list1737.size; ++_i1739) { - _elem1730 = iprot.readString(); - struct.group_names.add(_elem1730); + _elem1738 = iprot.readString(); + struct.group_names.add(_elem1738); } } struct.setGroup_namesIsSet(true); @@ -182509,14 +183646,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1732 = iprot.readListBegin(); - struct.success = new ArrayList(_list1732.size); - HiveObjectPrivilege _elem1733; - for (int _i1734 = 0; _i1734 < _list1732.size; ++_i1734) + org.apache.thrift.protocol.TList _list1740 = iprot.readListBegin(); + struct.success = new ArrayList(_list1740.size); + HiveObjectPrivilege _elem1741; + for (int _i1742 = 0; _i1742 < _list1740.size; ++_i1742) { - _elem1733 = new HiveObjectPrivilege(); - _elem1733.read(iprot); - struct.success.add(_elem1733); + _elem1741 = new HiveObjectPrivilege(); + _elem1741.read(iprot); + struct.success.add(_elem1741); } iprot.readListEnd(); } @@ -182551,9 +183688,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1735 : struct.success) + for (HiveObjectPrivilege _iter1743 : struct.success) { - _iter1735.write(oprot); + _iter1743.write(oprot); } oprot.writeListEnd(); } @@ -182592,9 +183729,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1736 : struct.success) + for (HiveObjectPrivilege _iter1744 : struct.success) { - _iter1736.write(oprot); + _iter1744.write(oprot); } } } @@ -182609,14 +183746,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1737.size); - HiveObjectPrivilege _elem1738; - for (int _i1739 = 0; _i1739 < _list1737.size; ++_i1739) + org.apache.thrift.protocol.TList _list1745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1745.size); + HiveObjectPrivilege _elem1746; + for (int _i1747 = 0; _i1747 < _list1745.size; ++_i1747) { - _elem1738 = new HiveObjectPrivilege(); - _elem1738.read(iprot); - struct.success.add(_elem1738); + _elem1746 = new HiveObjectPrivilege(); + _elem1746.read(iprot); + struct.success.add(_elem1746); } } struct.setSuccessIsSet(true); @@ -186563,13 +187700,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1740 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1740.size); - String _elem1741; - for (int _i1742 = 0; _i1742 < _list1740.size; ++_i1742) + org.apache.thrift.protocol.TList _list1748 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1748.size); + String _elem1749; + for (int _i1750 = 0; _i1750 < _list1748.size; ++_i1750) { - _elem1741 = iprot.readString(); - struct.group_names.add(_elem1741); + _elem1749 = iprot.readString(); + struct.group_names.add(_elem1749); } iprot.readListEnd(); } @@ -186600,9 +187737,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1743 : struct.group_names) + for (String _iter1751 : struct.group_names) { - oprot.writeString(_iter1743); + oprot.writeString(_iter1751); } oprot.writeListEnd(); } @@ -186639,9 +187776,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1744 : struct.group_names) + for (String _iter1752 : struct.group_names) { - oprot.writeString(_iter1744); + oprot.writeString(_iter1752); } } } @@ -186657,13 +187794,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1745.size); - String _elem1746; - for (int _i1747 = 0; _i1747 < _list1745.size; ++_i1747) + org.apache.thrift.protocol.TList _list1753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1753.size); + String _elem1754; + for (int _i1755 = 0; _i1755 < _list1753.size; ++_i1755) { - _elem1746 = iprot.readString(); - struct.group_names.add(_elem1746); + _elem1754 = iprot.readString(); + struct.group_names.add(_elem1754); } } struct.setGroup_namesIsSet(true); @@ -187066,13 +188203,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1748 = iprot.readListBegin(); - struct.success = new ArrayList(_list1748.size); - String _elem1749; - for (int _i1750 = 0; _i1750 < _list1748.size; ++_i1750) + org.apache.thrift.protocol.TList _list1756 = iprot.readListBegin(); + struct.success = new ArrayList(_list1756.size); + String _elem1757; + for (int _i1758 = 0; _i1758 < _list1756.size; ++_i1758) { - _elem1749 = iprot.readString(); - struct.success.add(_elem1749); + _elem1757 = iprot.readString(); + struct.success.add(_elem1757); } iprot.readListEnd(); } @@ -187107,9 +188244,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1751 : struct.success) + for (String _iter1759 : struct.success) { - oprot.writeString(_iter1751); + oprot.writeString(_iter1759); } oprot.writeListEnd(); } @@ -187148,9 +188285,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1752 : struct.success) + for (String _iter1760 : struct.success) { - oprot.writeString(_iter1752); + oprot.writeString(_iter1760); } } } @@ -187165,13 +188302,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1753.size); - String _elem1754; - for (int _i1755 = 0; _i1755 < _list1753.size; ++_i1755) + org.apache.thrift.protocol.TList _list1761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1761.size); + String _elem1762; + for (int _i1763 = 0; _i1763 < _list1761.size; ++_i1763) { - _elem1754 = iprot.readString(); - struct.success.add(_elem1754); + _elem1762 = iprot.readString(); + struct.success.add(_elem1762); } } struct.setSuccessIsSet(true); @@ -192462,13 +193599,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1756 = iprot.readListBegin(); - struct.success = new ArrayList(_list1756.size); - String _elem1757; - for (int _i1758 = 0; _i1758 < _list1756.size; ++_i1758) + org.apache.thrift.protocol.TList _list1764 = iprot.readListBegin(); + struct.success = new ArrayList(_list1764.size); + String _elem1765; + for (int _i1766 = 0; _i1766 < _list1764.size; ++_i1766) { - _elem1757 = iprot.readString(); - struct.success.add(_elem1757); + _elem1765 = iprot.readString(); + struct.success.add(_elem1765); } iprot.readListEnd(); } @@ -192494,9 +193631,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1759 : struct.success) + for (String _iter1767 : struct.success) { - oprot.writeString(_iter1759); + oprot.writeString(_iter1767); } oprot.writeListEnd(); } @@ -192527,9 +193664,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1760 : struct.success) + for (String _iter1768 : struct.success) { - oprot.writeString(_iter1760); + oprot.writeString(_iter1768); } } } @@ -192541,13 +193678,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1761.size); - String _elem1762; - for (int _i1763 = 0; _i1763 < _list1761.size; ++_i1763) + org.apache.thrift.protocol.TList _list1769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1769.size); + String _elem1770; + for (int _i1771 = 0; _i1771 < _list1769.size; ++_i1771) { - _elem1762 = iprot.readString(); - struct.success.add(_elem1762); + _elem1770 = iprot.readString(); + struct.success.add(_elem1770); } } struct.setSuccessIsSet(true); @@ -195577,13 +196714,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1764 = iprot.readListBegin(); - struct.success = new ArrayList(_list1764.size); - String _elem1765; - for (int _i1766 = 0; _i1766 < _list1764.size; ++_i1766) + org.apache.thrift.protocol.TList _list1772 = iprot.readListBegin(); + struct.success = new ArrayList(_list1772.size); + String _elem1773; + for (int _i1774 = 0; _i1774 < _list1772.size; ++_i1774) { - _elem1765 = iprot.readString(); - struct.success.add(_elem1765); + _elem1773 = iprot.readString(); + struct.success.add(_elem1773); } iprot.readListEnd(); } @@ -195609,9 +196746,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1767 : struct.success) + for (String _iter1775 : struct.success) { - oprot.writeString(_iter1767); + oprot.writeString(_iter1775); } oprot.writeListEnd(); } @@ -195642,9 +196779,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1768 : struct.success) + for (String _iter1776 : struct.success) { - oprot.writeString(_iter1768); + oprot.writeString(_iter1776); } } } @@ -195656,13 +196793,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1769.size); - String _elem1770; - for (int _i1771 = 0; _i1771 < _list1769.size; ++_i1771) + org.apache.thrift.protocol.TList _list1777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1777.size); + String _elem1778; + for (int _i1779 = 0; _i1779 < _list1777.size; ++_i1779) { - _elem1770 = iprot.readString(); - struct.success.add(_elem1770); + _elem1778 = iprot.readString(); + struct.success.add(_elem1778); } } struct.setSuccessIsSet(true); @@ -212783,13 +213920,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, find_columns_with_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1772 = iprot.readListBegin(); - struct.success = new ArrayList(_list1772.size); - String _elem1773; - for (int _i1774 = 0; _i1774 < _list1772.size; ++_i1774) + org.apache.thrift.protocol.TList _list1780 = iprot.readListBegin(); + struct.success = new ArrayList(_list1780.size); + String _elem1781; + for (int _i1782 = 0; _i1782 < _list1780.size; ++_i1782) { - _elem1773 = iprot.readString(); - struct.success.add(_elem1773); + _elem1781 = iprot.readString(); + struct.success.add(_elem1781); } iprot.readListEnd(); } @@ -212815,9 +213952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, find_columns_with_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1775 : struct.success) + for (String _iter1783 : struct.success) { - oprot.writeString(_iter1775); + oprot.writeString(_iter1783); } oprot.writeListEnd(); } @@ -212848,9 +213985,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, find_columns_with_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1776 : struct.success) + for (String _iter1784 : struct.success) { - oprot.writeString(_iter1776); + oprot.writeString(_iter1784); } } } @@ -212862,13 +213999,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, find_columns_with_st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1777.size); - String _elem1778; - for (int _i1779 = 0; _i1779 < _list1777.size; ++_i1779) + org.apache.thrift.protocol.TList _list1785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1785.size); + String _elem1786; + for (int _i1787 = 0; _i1787 < _list1785.size; ++_i1787) { - _elem1778 = iprot.readString(); - struct.success.add(_elem1778); + _elem1786 = iprot.readString(); + struct.success.add(_elem1786); } } struct.setSuccessIsSet(true); @@ -249754,14 +250891,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1780 = iprot.readListBegin(); - struct.success = new ArrayList(_list1780.size); - SchemaVersion _elem1781; - for (int _i1782 = 0; _i1782 < _list1780.size; ++_i1782) + org.apache.thrift.protocol.TList _list1788 = iprot.readListBegin(); + struct.success = new ArrayList(_list1788.size); + SchemaVersion _elem1789; + for (int _i1790 = 0; _i1790 < _list1788.size; ++_i1790) { - _elem1781 = new SchemaVersion(); - _elem1781.read(iprot); - struct.success.add(_elem1781); + _elem1789 = new SchemaVersion(); + _elem1789.read(iprot); + struct.success.add(_elem1789); } iprot.readListEnd(); } @@ -249805,9 +250942,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1783 : struct.success) + for (SchemaVersion _iter1791 : struct.success) { - _iter1783.write(oprot); + _iter1791.write(oprot); } oprot.writeListEnd(); } @@ -249854,9 +250991,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1784 : struct.success) + for (SchemaVersion _iter1792 : struct.success) { - _iter1784.write(oprot); + _iter1792.write(oprot); } } } @@ -249874,14 +251011,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1785.size); - SchemaVersion _elem1786; - for (int _i1787 = 0; _i1787 < _list1785.size; ++_i1787) + org.apache.thrift.protocol.TList _list1793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1793.size); + SchemaVersion _elem1794; + for (int _i1795 = 0; _i1795 < _list1793.size; ++_i1795) { - _elem1786 = new SchemaVersion(); - _elem1786.read(iprot); - struct.success.add(_elem1786); + _elem1794 = new SchemaVersion(); + _elem1794.read(iprot); + struct.success.add(_elem1794); } } struct.setSuccessIsSet(true); @@ -258424,14 +259561,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1788 = iprot.readListBegin(); - struct.success = new ArrayList(_list1788.size); - RuntimeStat _elem1789; - for (int _i1790 = 0; _i1790 < _list1788.size; ++_i1790) + org.apache.thrift.protocol.TList _list1796 = iprot.readListBegin(); + struct.success = new ArrayList(_list1796.size); + RuntimeStat _elem1797; + for (int _i1798 = 0; _i1798 < _list1796.size; ++_i1798) { - _elem1789 = new RuntimeStat(); - _elem1789.read(iprot); - struct.success.add(_elem1789); + _elem1797 = new RuntimeStat(); + _elem1797.read(iprot); + struct.success.add(_elem1797); } iprot.readListEnd(); } @@ -258466,9 +259603,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1791 : struct.success) + for (RuntimeStat _iter1799 : struct.success) { - _iter1791.write(oprot); + _iter1799.write(oprot); } oprot.writeListEnd(); } @@ -258507,9 +259644,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1792 : struct.success) + for (RuntimeStat _iter1800 : struct.success) { - _iter1792.write(oprot); + _iter1800.write(oprot); } } } @@ -258524,14 +259661,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1793.size); - RuntimeStat _elem1794; - for (int _i1795 = 0; _i1795 < _list1793.size; ++_i1795) + org.apache.thrift.protocol.TList _list1801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1801.size); + RuntimeStat _elem1802; + for (int _i1803 = 0; _i1803 < _list1801.size; ++_i1803) { - _elem1794 = new RuntimeStat(); - _elem1794.read(iprot); - struct.success.add(_elem1794); + _elem1802 = new RuntimeStat(); + _elem1802.read(iprot); + struct.success.add(_elem1802); } } struct.setSuccessIsSet(true); diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 205c867db1..cb065f0899 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2343,6 +2343,27 @@ public boolean tableExists(String catName, String dbName, String tableName) thro isClientFilterEnabled, filterHook, catName, db_name, tbl_name, partNames); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, short maxParts) + throws MetaException, TException, NoSuchObjectException { + + PartitionsByExprRequest req = new PartitionsByExprRequest( + dbName, tblName, ByteBuffer.wrap(exprBytes)); + if (defaultPartName != null) { + req.setDefaultPartitionName(defaultPartName); + } + if (maxParts >= 0) { + req.setMaxParts(maxParts); + } + if (order != null) { + req.setOrder(order); + } + req.setCatName(catName); + return FilterUtils.filterPartitionNamesIfEnabled(isClientFilterEnabled, filterHook, catName, + dbName, tblName, client.get_partition_names_req(req)); + } + @Override public int getNumPartitionsByFilter(String db_name, String tbl_name, String filter) throws TException { diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index b58b1e4a07..f4eec63afb 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1212,6 +1212,24 @@ PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tabl List part_vals, int max_parts) throws MetaException, TException, NoSuchObjectException; + /** + * Get a list of partition names matching the specified filter and return in order + * @param dbName database name. + * @param tblName table name. + * @param defaultPartName default partition name + * @param exprBytes expression, serialized from ExprNodeDesc + * @param order how the matching partitions names should places. + * @param maxParts maximum number of partition names to return, or -1 to return all that are + * found. + * @return list of matching partition names. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws NoSuchObjectException no such table. + */ + List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, short maxParts) + throws MetaException, TException, NoSuchObjectException; + /** * Get a list of partition values * @param request request diff --git standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 098ddec5dc..82835e6d68 100644 --- standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -763,8 +763,9 @@ struct PartitionsByExprRequest { 2: required string tblName, 3: required binary expr, 4: optional string defaultPartitionName, - 5: optional i16 maxParts=-1 - 6: optional string catName + 5: optional i16 maxParts=-1, + 6: optional string catName, + 7: optional string order } struct TableStatsResult { @@ -2238,6 +2239,9 @@ service ThriftHiveMetastore extends fb303.FacebookService 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) throws(1:MetaException o1, 2:NoSuchObjectException o2) + list get_partition_names_req(1:PartitionsByExprRequest req) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + // get the partitions matching the given partition filter list get_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter, 4:i16 max_parts=-1) diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 77d34047a4..a358a95894 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -6220,6 +6220,31 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n return ret; } + @Override + public List get_partition_names_req(PartitionsByExprRequest req) + throws MetaException, NoSuchObjectException, TException { + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + String dbName = req.getDbName(), tblName = req.getTblName(); + startTableFunction("get_partition_names_req", catName, + dbName, tblName); + fireReadTablePreEvent(catName, dbName, tblName); + List ret = null; + Exception ex = null; + try { + authorizeTableForPartitionMetadata(catName, dbName, tblName); + ret = getMS().listPartitionNames(catName, dbName, tblName, + req.getDefaultPartitionName(), req.getExpr(), req.getOrder(), req.getMaxParts()); + ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, + filterHook, catName, dbName, tblName, ret); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partition_names_req", ret != null, ex, tblName); + } + return ret; + } + @Override public List partition_name_to_vals(String part_name) throws TException { if (part_name.length() == 0) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index d1558876f1..ec12d2c647 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -32,6 +32,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -496,6 +497,146 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ queryParams, pms.toArray(), queryText); } + public List getPartitionNamesViaSql(SqlFilterForPushdown filter, + String defaultPartName, String order, Integer max) throws MetaException { + String catName = filter.table.isSetCatName() ? filter.table.getCatName() : + DEFAULT_CATALOG_NAME; + if (filter.joins.isEmpty()) { + int psize = filter.table.getPartitionKeysSize(); + for (int i = 0; i < psize; i++) { + filter.joins.add(null); + } + } + Map orderMap = new LinkedHashMap(); + if (order != null) { + String[] parts = order.split(":"); + String[] poses = parts[0].split(","); + if (poses.length != parts[1].length()) { + throw new MetaException("The length of partition keys and sort order" + + " do not mismatch, order: " + order); + } + + for (int i = 0; i < poses.length; i++) { + int pos = Integer.parseInt(poses[i]); + String sort = ('+' == parts[1].charAt(i)) ? "ASC" : "DESC"; + FieldSchema partitionKey = filter.table.getPartitionKeys().get(pos); + orderMap.put(pos, new String[]{sort, partitionKey.getType()}); + } + } + boolean dbHasJoinCastBug = DatabaseProduct.hasJoinOperationOrderBug(dbType); + String defaultPartitionName = (defaultPartName == null || + defaultPartName.isEmpty()) ? this.defaultPartName : defaultPartName; + + return getPartitionNamesViaSqlInternal(catName, filter + , defaultPartitionName, orderMap, max, dbHasJoinCastBug); + } + + private List getPartitionNamesViaSqlInternal(String catName, SqlFilterForPushdown filter, + String defaultPartName, Map orderMap, Integer max, boolean dbHasJoinCastBug) + throws MetaException { + String dbName = filter.table.getDbName(), tblName = filter.table.getTableName(), + sqlFilter = filter.filter; + List paramsForFilter = filter.params; + List joins = filter.joins; + StringBuilder orderColumns = new StringBuilder(), orderClause = new StringBuilder(); + int i = 0; + List paramsForOrder = new ArrayList(); + for (Map.Entry entry : orderMap.entrySet()) { + int partColIndex = entry.getKey(); + String orderAlias = "ODR" + (i++); + String tableValue, tableAlias; + if (joins.get(partColIndex) == null) { + tableAlias = "ORDER" + partColIndex; + joins.set(partColIndex, "inner join " + PARTITION_KEY_VALS + " \"" + tableAlias + + "\" on \"" + tableAlias + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\"" + + " and \"" + tableAlias + "\".\"INTEGER_IDX\" = " + partColIndex); + tableValue = " \"" + tableAlias + "\".\"PART_KEY_VAL\" "; + } else { + tableAlias = "FILTER" + partColIndex; + tableValue = " \"" + tableAlias + "\".\"PART_KEY_VAL\" "; + } + + String tableColumn = tableValue; + PartitionFilterGenerator.FilterType type = + PartitionFilterGenerator.FilterType.fromType(entry.getValue()[1]); + if (type == PartitionFilterGenerator.FilterType.Date) { + if (dbType == DatabaseProduct.ORACLE) { + tableValue = "TO_DATE(" + tableValue + ", 'YYYY-MM-DD')"; + } else { + tableValue = "cast(" + tableValue + " as date)"; + } + } else if (type == PartitionFilterGenerator.FilterType.Integral) { + tableValue = "CAST(" + tableColumn + " AS decimal(21,0))"; + } + String tableValue0 = tableValue; + tableValue = "(case when " + tableColumn + " <> ?"; + paramsForOrder.add(defaultPartName); + if (dbHasJoinCastBug) { + tableValue += (" and " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + + DBS + ".\"CTLG_NAME\" = ? and " + + "\"" + tableAlias + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\" and " + + "\"" + tableAlias + "\".\"INTEGER_IDX\" = " + partColIndex); + paramsForOrder.add(tblName.toLowerCase()); + paramsForOrder.add(dbName.toLowerCase()); + paramsForOrder.add(catName.toLowerCase()); + } + tableValue += " then " + tableValue0 + " else null end) AS \"" + orderAlias + "\""; + orderColumns.append(tableValue).append(","); + orderClause.append("\"").append(orderAlias).append("\" ") + .append(entry.getValue()[0]).append(","); + } + + for (int j = 0; j < joins.size(); j++) { + if (joins.get(j) == null) { + joins.remove(j--); + } + } + if (orderClause.length() > 0) { + orderClause.setLength(orderClause.length() - 1); + orderColumns.setLength(orderColumns.length() - 1); + } + + String orderCls = " order by " + + (orderClause.length() > 0 ? orderClause.toString() : "\"PART_NAME\" asc"); + String columns = orderColumns.length() > 0 ? ", " + orderColumns.toString() : ""; + String queryText = + "select " + PARTITIONS + ".\"PART_NAME\"" + columns + " from " + PARTITIONS + "" + + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" " + + " and " + TBLS + ".\"TBL_NAME\" = ? " + + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + + " and " + DBS + ".\"NAME\" = ? " + + join(joins, ' ') + + " where " + DBS + ".\"CTLG_NAME\" = ? " + + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderCls; + + Object[] params = new Object[paramsForFilter.size() + paramsForOrder.size() + 3]; + i = 0; + for (; i < paramsForOrder.size(); i++) { + params[i] = paramsForOrder.get(i); + } + params[i] = tblName; + params[i+1] = dbName; + params[i+2] = catName; + for (int j = 0; j < paramsForFilter.size(); j++) { + params[i + j + 3] = paramsForFilter.get(j); + } + + Query query = pm.newQuery("javax.jdo.query.SQL", queryText); + List partNames = new LinkedList(); + int limit = (max == null ? -1 : max); + try { + List sqlResult = executeWithArray(query, params, queryText, limit); + for (Object result : sqlResult) { + String partName = !columns.isEmpty() ? + String.valueOf(((Object[]) result)[0]) : String.valueOf(result); + partNames.add(partName); + } + } finally { + query.closeAll(); + } + return partNames; + } + /** * Gets partitions by using direct SQL queries. * @param catName Metastore catalog name. @@ -648,6 +789,15 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ private final List joins = new ArrayList<>(); private String filter; private Table table; + // Should remove null values in joins + private boolean compactJoins; + SqlFilterForPushdown() { + this.compactJoins = true; + } + SqlFilterForPushdown(Table table, boolean compactJoins) { + this.table = table; + this.compactJoins = compactJoins; + } } public boolean generateSqlFilterForPushdown( @@ -662,7 +812,7 @@ public boolean generateSqlFilterForPushdown(Table table, ExpressionTree tree, St result.table = table; result.filter = PartitionFilterGenerator.generateSqlFilter(table, tree, result.params, result.joins, dbHasJoinCastBug, ((defaultPartitionName == null) ? defaultPartName : defaultPartitionName), - dbType, schema); + dbType, schema, result.compactJoins); return result.filter != null; } @@ -1031,7 +1181,7 @@ private PartitionFilterGenerator(Table table, List params, List */ private static String generateSqlFilter(Table table, ExpressionTree tree, List params, List joins, boolean dbHasJoinCastBug, String defaultPartName, - DatabaseProduct dbType, String schema) throws MetaException { + DatabaseProduct dbType, String schema, boolean compactJoins) throws MetaException { assert table != null; if (tree == null) { // consistent with other APIs like makeExpressionTree, null is returned to indicate that @@ -1050,9 +1200,13 @@ private static String generateSqlFilter(Table table, ExpressionTree tree, List listPartitionNames(final String catName, final String dbName, final String tblName, + final String defaultPartName, final byte[] exprBytes, + final String order, final short maxParts) throws MetaException, NoSuchObjectException { + + return new GetListHelper(catName, dbName, tblName, true, false) { + private List getPartitionNamesByExprNoTxn(Table table) throws MetaException { + SqlFilterForPushdown filter = new SqlFilterForPushdown(table, false); + List result = directSql.getPartitionNamesViaSql(filter, defaultPartName, order, -1); + expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), exprBytes, + getDefaultPartitionName(defaultPartName), result); + if (maxParts >=0 && result.size() > maxParts) { + result = result.subList(0, maxParts); + } + return result; + } + @Override + protected List getSqlResult(GetHelper> ctx) throws MetaException { + SqlFilterForPushdown filter = new SqlFilterForPushdown(ctx.getTable(), false); + if (exprBytes.length == 1 && exprBytes[0] == -1) { + return directSql.getPartitionNamesViaSql(filter, defaultPartName, order, (int)maxParts); + } + + List partNames = null; + final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, exprBytes, + getDefaultPartitionName(defaultPartName)); + // If we have some sort of expression tree, try SQL filter pushdown. + if (exprTree != null) { + if (directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, defaultPartName, filter)) { + partNames = directSql.getPartitionNamesViaSql(filter, defaultPartName, order, (int)maxParts); + } + } + if (partNames == null) { + partNames = getPartitionNamesByExprNoTxn(ctx.getTable()); + } + return partNames; + } + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException, NoSuchObjectException { + return Collections.emptyList(); + } + }.run(true); + } + private String extractPartitionKey(FieldSchema key, List pkeys) { StringBuilder buffer = new StringBuilder(256); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index c334421adf..3f498c8473 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -482,6 +482,19 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) throws MetaException; + /** + * Get a partial or complete list of names for partitions of a table. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param maxParts maximum number of partitions to retrieve, -1 for all. + * @return list of partition names. + * @throws MetaException there was an error accessing the RDBMS + */ + List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, + short maxParts) throws MetaException, NoSuchObjectException; + /** * Get a list of partition values as one big struct. * @param catName catalog name. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index f31cc5d7a1..8d28c1fd5a 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -1518,6 +1518,12 @@ private void validateTableType(Table tbl) { return partitionNames; } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, String defaultPartName, + byte[] exprBytes, String order, short maxParts) throws MetaException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String dbName, String tblName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f7032b93d1..e31dc064c1 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -362,6 +362,13 @@ public void updateCreationMetadata(String catName, String dbname, String tablena return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, String defaultPartName, + byte[] exprBytes, String order, short maxParts) throws MetaException, NoSuchObjectException { + return objectStore.listPartitionNames(catName, dbName, tblName, + defaultPartName, exprBytes, order, maxParts); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index bea7e9572b..89acdcc55b 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -362,6 +362,13 @@ public void updateCreationMetadata(String catName, String dbname, String tablena return Collections.emptyList(); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, String defaultPartName, + byte[] exprBytes, String order, short maxParts) throws MetaException, NoSuchObjectException { + + return Collections.emptyList(); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index fc071f9a20..3534539afb 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -1416,6 +1416,14 @@ public Partition getPartition(String db_name, String tbl_name, return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, + short maxParts) throws MetaException, TException, NoSuchObjectException { + + throw new UnsupportedOperationException(); + } + @Override public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) throws MetaException, TException, NoSuchObjectException {