diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 4984138ef8..7e0ce0734b 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -394,6 +394,15 @@ public Table alterTable(String catName, String dbName, String name, Table newTab return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, + short maxParts) throws MetaException, NoSuchObjectException { + + return objectStore.listPartitionNames(catName, dbName, tblName, + defaultPartName, exprBytes, order, maxParts); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index c55f8db61a..14580f6d23 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -1686,6 +1686,7 @@ minillaplocal.query.files=\ show_functions.q,\ show_materialized_views.q,\ show_partitions.q,\ + show_partitions2.q,\ show_roles.q,\ show_tables.q,\ show_tablestatus.q,\ diff --git parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index b03b0989b8..74eaccdd63 100644 --- parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -1674,7 +1674,7 @@ showStatement | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tableName ((KW_FROM|KW_IN) db_name=identifier)? (KW_LIKE showStmtIdentifier|showStmtIdentifier)? -> ^(TOK_SHOWCOLUMNS tableName (TOK_FROM $db_name)? showStmtIdentifier?) | KW_SHOW KW_FUNCTIONS (KW_LIKE showFunctionIdentifier)? -> ^(TOK_SHOWFUNCTIONS KW_LIKE? showFunctionIdentifier?) - | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec?) + | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? whereClause? orderByClause? limitClause? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec? whereClause? orderByClause? limitClause?) | KW_SHOW KW_CREATE ( (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) db_name=identifier -> ^(TOK_SHOW_CREATEDATABASE $db_name) | diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java index 2f659e6382..a4158d6cf2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java @@ -18,12 +18,20 @@ package org.apache.hadoop.hive.ql.ddl.table.partition.show; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -32,7 +40,20 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveTableName; +import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.type.ExprNodeTypeCheck; +import org.apache.hadoop.hive.ql.parse.type.TypeCheckCtx; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; /** * Analyzer for show partition commands. @@ -56,11 +77,175 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { Table table = getTable(HiveTableName.of(tableName)); inputs.add(new ReadEntity(table)); - ShowPartitionsDesc desc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); + ExprNodeDesc filter = getShowPartitionsFilter(table, ast); + String orderBy = getShowPartitionsOrder(table, ast); + short limit = getShowPartitionsLimit(ast); + + ShowPartitionsDesc desc = new ShowPartitionsDesc(tableName, ctx.getResFile(), + partSpec, filter, orderBy, limit); Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); rootTasks.add(task); task.setFetchSource(true); setFetchTask(createFetchTask(ShowPartitionsDesc.SCHEMA)); } + + + @VisibleForTesting + ExprNodeDesc getShowPartitionsFilter(Table table, ASTNode command) throws SemanticException { + ExprNodeDesc showFilter = null; + for (int childIndex = 0; childIndex < command.getChildCount(); childIndex++) { + ASTNode astChild = (ASTNode)command.getChild(childIndex); + if (astChild.getType() == HiveParser.TOK_WHERE) { + RowResolver rwsch = new RowResolver(); + Map colTypes = new HashMap(); + for (FieldSchema fs : table.getPartCols()) { + rwsch.put(table.getTableName(), fs.getName(), new ColumnInfo(fs.getName(), + TypeInfoFactory.stringTypeInfo, null, true)); + colTypes.put(fs.getName().toLowerCase(), fs.getType()); + } + TypeCheckCtx tcCtx = new TypeCheckCtx(rwsch); + ASTNode conds = (ASTNode) astChild.getChild(0); + Map nodeOutputs = ExprNodeTypeCheck.genExprNode(conds, tcCtx); + ExprNodeDesc target = nodeOutputs.get(conds); + if (!(target instanceof ExprNodeGenericFuncDesc) || !target.getTypeInfo().equals( + TypeInfoFactory.booleanTypeInfo)) { + String errorMsg = tcCtx.getError() != null ? ". " + tcCtx.getError() : ""; + throw new SemanticException("Not a filter expr: " + + (target == null ? "null" : target.getExprString()) + errorMsg); + } + + showFilter = replaceDefaultPartNameAndCastType(target, colTypes, + HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME)); + } + } + return showFilter; + } + + private ExprNodeDesc replaceDefaultPartNameAndCastType(ExprNodeDesc nodeDesc, + Map colTypes, String defaultPartName) throws SemanticException { + if (!(nodeDesc instanceof ExprNodeGenericFuncDesc)) { + return nodeDesc; + } + ExprNodeGenericFuncDesc funcDesc = (ExprNodeGenericFuncDesc) nodeDesc; + if (FunctionRegistry.isOpAnd(funcDesc) || FunctionRegistry.isOpOr(funcDesc)) { + List newChildren = new ArrayList(); + for (ExprNodeDesc child : funcDesc.getChildren()) { + newChildren.add(replaceDefaultPartNameAndCastType(child, colTypes, defaultPartName)); + } + funcDesc.setChildren(newChildren); + return funcDesc; + } + + List children = funcDesc.getChildren(); + int colIdx = -1, constIdx = -1; + for (int i = 0; i < children.size(); i++) { + ExprNodeDesc child = children.get(i); + if (child instanceof ExprNodeColumnDesc) { + String col = ((ExprNodeColumnDesc)child).getColumn().toLowerCase(); + String type = colTypes.get(col); + if (!type.equals(child.getTypeString())) { + child.setTypeInfo(TypeInfoFactory.getPrimitiveTypeInfo(type)); + } + colIdx = i; + } else if (child instanceof ExprNodeConstantDesc) { + constIdx = i; + } + } + + if (funcDesc.getGenericUDF() instanceof GenericUDFBaseCompare && children.size() == 2 && + colIdx > -1 && constIdx > -1) { + ExprNodeConstantDesc constantDesc = (ExprNodeConstantDesc)children.get(constIdx); + ExprNodeColumnDesc columnDesc = (ExprNodeColumnDesc)children.get(colIdx); + Object val = constantDesc.getValue(); + boolean isDefaultPartitionName = defaultPartName.equals(val); + String type = colTypes.get(columnDesc.getColumn().toLowerCase()); + PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type); + if (!isDefaultPartitionName) { + if (!constantDesc.getTypeString().equals(type)) { + Object converted = ObjectInspectorConverters.getConverter( + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(constantDesc.getTypeInfo()), + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(pti)) + .convert(val); + if (converted == null) { + throw new SemanticException("Cannot convert to " + type + " from " + + constantDesc.getTypeString() + ", value: " + val); + } + ExprNodeConstantDesc newConstantDesc = new ExprNodeConstantDesc(pti, converted); + children.set(constIdx, newConstantDesc); + } + } else { + GenericUDF originalOp = funcDesc.getGenericUDF(); + String fnName; + if (FunctionRegistry.isEq(originalOp)) { + fnName = "isnull"; + } else if (FunctionRegistry.isNeq(originalOp)) { + fnName = "isnotnull"; + } else { + throw new SemanticException( + "Only '=' and '!=' are allowed for the default partition, function: " + originalOp.getUdfName()); + } + funcDesc = PartitionUtils.makeUnaryPredicate(fnName, columnDesc); + } + } + + return funcDesc; + } + + private String getShowPartitionsOrder(Table table, ASTNode command) throws SemanticException { + String orderBy = null; + for (int childIndex = 0; childIndex < command.getChildCount(); childIndex++) { + ASTNode astChild = (ASTNode) command.getChild(childIndex); + if (astChild.getType() == HiveParser.TOK_ORDERBY) { + Map poses = new HashMap(); + RowResolver rwsch = new RowResolver(); + for (int i = 0; i < table.getPartCols().size(); i++) { + FieldSchema fs = table.getPartCols().get(i); + rwsch.put(table.getTableName(), fs.getName(), new ColumnInfo(fs.getName(), + TypeInfoFactory.getPrimitiveTypeInfo(fs.getType()), null, true)); + poses.put(fs.getName().toLowerCase(), i); + } + TypeCheckCtx tcCtx = new TypeCheckCtx(rwsch); + + StringBuilder colIndices = new StringBuilder(); + StringBuilder order = new StringBuilder(); + int ccount = astChild.getChildCount(); + for (int i = 0; i < ccount; ++i) { + // @TODO: implement null first or last + ASTNode cl = (ASTNode) astChild.getChild(i); + if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { + order.append("+"); + cl = (ASTNode) cl.getChild(0).getChild(0); + } else if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEDESC) { + order.append("-"); + cl = (ASTNode) cl.getChild(0).getChild(0); + } else { + order.append("+"); + } + Map nodeOutputs = ExprNodeTypeCheck.genExprNode(cl, tcCtx); + ExprNodeDesc desc = nodeOutputs.get(cl); + if (!(desc instanceof ExprNodeColumnDesc)) { + throw new SemanticException("Only partition keys are allowed for " + + "sorting partition names, input: " + cl.toStringTree()); + } + String col = ((ExprNodeColumnDesc) desc).getColumn().toLowerCase(); + colIndices.append(poses.get(col)).append(","); + } + colIndices.setLength(colIndices.length() - 1); + orderBy = colIndices + ":" + order; + } + } + return orderBy; + } + + private short getShowPartitionsLimit(ASTNode command) { + short limit = -1; + for (int childIndex = 0; childIndex < command.getChildCount(); childIndex++) { + ASTNode astChild = (ASTNode) command.getChild(childIndex); + if (astChild.getType() == HiveParser.TOK_LIMIT) { + limit = Short.valueOf((astChild.getChild(0)).getText()); + } + } + return limit; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java index eeef253af8..9d6ebcc6e1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** * DDL task description for SHOW PARTITIONS commands. @@ -38,11 +39,18 @@ private final String tabName; private final String resFile; private final Map partSpec; + private final short limit; + private final String order; + private final ExprNodeDesc cond; - public ShowPartitionsDesc(String tabName, Path resFile, Map partSpec) { + public ShowPartitionsDesc(String tabName, Path resFile, Map partSpec, + ExprNodeDesc condition, String order, short limit) { this.tabName = tabName; this.resFile = resFile.toString(); this.partSpec = partSpec; + this.cond = condition; + this.order = order; + this.limit = limit; } @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -59,4 +67,27 @@ public String getTabName() { public String getResFile() { return resFile; } + + public short getLimit() { + return limit; + } + + public ExprNodeDesc getCond() { + return cond; + } + + @Explain(displayName = "limit", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public int getLimitExplain() { + return limit; + } + + @Explain(displayName = "order", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getOrder() { + return order; + } + + @Explain(displayName = "condition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getFilterStr() { + return cond.getExprString(); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java index 2b1a002748..ba20861395 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java @@ -19,15 +19,29 @@ package org.apache.hadoop.hive.ql.ddl.table.partition.show; import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; /** * Operation process of showing the partitions of a table. @@ -44,11 +58,14 @@ public int execute() throws HiveException { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, desc.getTabName()); } - List parts = null; - if (desc.getPartSpec() != null) { - parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), desc.getPartSpec(), (short) -1); + List parts; + if (desc.getCond() != null || desc.getOrder() != null) { + parts = getPartitionNames(tbl); + } else if (desc.getPartSpec() != null) { + parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), + desc.getPartSpec(), desc.getLimit()); } else { - parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short) -1); + parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), desc.getLimit()); } // write the results in the file @@ -60,4 +77,42 @@ public int execute() throws HiveException { return 0; } + + // Get partition names if order or filter is specified. + private List getPartitionNames(Table tbl) throws HiveException { + List partNames; + ExprNodeDesc predicate = desc.getCond(); + if (desc.getPartSpec() != null) { + List fieldSchemas = tbl.getPartitionKeys(); + Map colTypes = new HashMap(); + for (FieldSchema fs : fieldSchemas) { + colTypes.put(fs.getName().toLowerCase(), fs.getType()); + } + for (Map.Entry entry : desc.getPartSpec().entrySet()) { + String type = colTypes.get(entry.getKey().toLowerCase()); + PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type); + Object val = entry.getValue(); + if (!pti.equals(TypeInfoFactory.stringTypeInfo)) { + Object converted = ObjectInspectorConverters.getConverter( + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(TypeInfoFactory.stringTypeInfo), + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(pti)) + .convert(val); + if (converted == null) { + throw new HiveException("Cannot convert to " + type + " from string, value: " + val); + } + val = converted; + } + List children = new ArrayList(); + children.add(new ExprNodeColumnDesc(pti, entry.getKey().toLowerCase(), null, true)); + children.add(new ExprNodeConstantDesc(pti, val)); + ExprNodeDesc exprNodeDesc = ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPEqual(), children); + predicate = (predicate == null) ? exprNodeDesc : + ExprNodeDescUtils.mergePredicates(exprNodeDesc, predicate); + } + } + + partNames = context.getDb().getPartitionNames(tbl, (ExprNodeGenericFuncDesc) predicate, + desc.getOrder(), desc.getLimit()); + return partNames; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 1f9fb3b897..df1c26eb2e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -3600,6 +3600,27 @@ public boolean dropPartition(String dbName, String tableName, List parti return names; } + public List getPartitionNames(Table tbl, ExprNodeGenericFuncDesc expr, String order, + short maxParts) throws HiveException { + List names = null; + // the exprBytes should not be null by thrift definition + byte[] exprBytes = {(byte)-1}; + if (expr != null) { + exprBytes = SerializationUtilities.serializeExpressionToKryo(expr); + } + try { + String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + names = getMSC().listPartitionNames(tbl.getCatalogName(), tbl.getDbName(), + tbl.getTableName(), defaultPartitionName, exprBytes, order, maxParts); + } catch (NoSuchObjectException nsoe) { + return Lists.newArrayList(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + return names; + } + /** * get all the partitions that the table has * diff --git ql/src/test/org/apache/hadoop/hive/ql/ddl/table/partition/show/TestShowPartitionAnalyzer.java ql/src/test/org/apache/hadoop/hive/ql/ddl/table/partition/show/TestShowPartitionAnalyzer.java new file mode 100644 index 0000000000..6e07d5d3af --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/ddl/table/partition/show/TestShowPartitionAnalyzer.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.show; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.parse.type.ExprNodeTypeCheck; +import org.apache.hadoop.hive.ql.parse.type.TypeCheckCtx; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +public class TestShowPartitionAnalyzer { + + @Test + public void testGetShowPartitionsFilter() throws Exception { + + List partColumns = new ArrayList(); + partColumns.add(new FieldSchema("ds", TypeInfoFactory.dateTypeInfo.getTypeName(), null)); + partColumns.add(new FieldSchema("hr", TypeInfoFactory.intTypeInfo.getTypeName(), null)); + partColumns.add(new FieldSchema("rs", TypeInfoFactory.stringTypeInfo.getTypeName(), null)); + RowResolver rwsch = new RowResolver(); + rwsch.put("tableBar", "ds", new ColumnInfo("ds", + TypeInfoFactory.dateTypeInfo, null, true)); + rwsch.put("tableBar", "hr", new ColumnInfo("hr", + TypeInfoFactory.intTypeInfo, null, true)); + rwsch.put("tableBar", "rs", new ColumnInfo("rs", + TypeInfoFactory.stringTypeInfo, null, true)); + TypeCheckCtx tcCtx = new TypeCheckCtx(rwsch); + // Numeric columns compare with the default partition + String showPart1 = "show partitions databaseFoo.tableBar " + + "where ds > '2010-03-03' and hr = '__HIVE_DEFAULT_PARTITION__' and " + + "rs <= 421021"; + ASTNode command = ParseUtils.parse(showPart1); + ExprNodeGenericFuncDesc funcDesc = (ExprNodeGenericFuncDesc)genExprNodeByDefault(tcCtx, command); + // the hr op '__HIVE_DEFAULT_PARTITION__' converts to null + Assert.assertEquals(new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, + null).getExprString(), funcDesc.getChildren().get(1).getExprString()); + // the type between the column ds and the constant '2010-03-03' do not mismatch + ExprNodeGenericFuncDesc child = (ExprNodeGenericFuncDesc)funcDesc.getChildren().get(0); + Assert.assertEquals("ds", ((ExprNodeColumnDesc)child.getChildren().get(0)).getColumn()); + Assert.assertEquals("2010-03-03", ((ExprNodeConstantDesc)child.getChildren().get(1)).getValue()); + + // rs <= 421021 + child = (ExprNodeGenericFuncDesc)funcDesc.getChildren().get(2); + Assert.assertEquals("rs", ((ExprNodeColumnDesc)child.getChildren().get(0)).getColumn()); + Assert.assertEquals(421021, ((ExprNodeConstantDesc)child.getChildren().get(1)).getValue()); + + Table table = new Table(new org.apache.hadoop.hive.metastore.api.Table("databaseFoo", "tableBar", + "foo", 1, 1, -1, null, partColumns, null, + null, null, TableType.MANAGED_TABLE.name())); + ShowPartitionAnalyzer analyzer = new ShowPartitionAnalyzer(QueryState.getNewQueryState( + new HiveConf(), null)); + funcDesc = (ExprNodeGenericFuncDesc)analyzer.getShowPartitionsFilter(table, command); + Assert.assertTrue(funcDesc.getChildren().size() == 3); + // ds > '2010-03-03' + child = (ExprNodeGenericFuncDesc)funcDesc.getChildren().get(0); + Assert.assertEquals("ds", ((ExprNodeColumnDesc)child.getChildren().get(0)).getColumn()); + Assert.assertEquals(TypeInfoFactory.dateTypeInfo, child.getChildren().get(0).getTypeInfo()); + Assert.assertEquals(child.getChildren().get(0).getTypeString(), + child.getChildren().get(1).getTypeString()); + // rs <= 421021 + child = (ExprNodeGenericFuncDesc)funcDesc.getChildren().get(2); + Assert.assertEquals("rs", ((ExprNodeColumnDesc)child.getChildren().get(0)).getColumn()); + Assert.assertEquals(TypeInfoFactory.stringTypeInfo, child.getChildren().get(0).getTypeInfo()); + Assert.assertEquals(child.getChildren().get(0).getTypeString(), + child.getChildren().get(1).getTypeString()); + + // hr = '__HIVE_DEFAULT_PARTITION__' + child = (ExprNodeGenericFuncDesc)funcDesc.getChildren().get(1); + Assert.assertTrue(child.getChildren().size() == 1); + Assert.assertEquals("hr", ((ExprNodeColumnDesc)child.getChildren().get(0)).getColumn()); + Assert.assertEquals(child.getGenericUDF().getUdfName(), + FunctionRegistry.getFunctionInfo("isnull").getGenericUDF().getUdfName()); + + // invalid input + String showPart2 = "show partitions databaseFoo.tableBar " + + "where hr > 'a123' and hr <= '2346b'"; + command = ParseUtils.parse(showPart2); + try { + analyzer.getShowPartitionsFilter(table, command); + Assert.fail("show throw semantic exception"); + } catch (Exception e) { + Assert.assertTrue(e.getMessage().contains("Cannot convert to int from string")); + } + + funcDesc = (ExprNodeGenericFuncDesc)genExprNodeByDefault(tcCtx, command); + List partColumnNames = new ArrayList<>(); + List partColumnTypeInfos = new ArrayList<>(); + for (FieldSchema fs : partColumns) { + partColumnNames.add(fs.getName()); + partColumnTypeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType())); + } + + List partNames = new LinkedList(); + partNames.add("ds=2010-11-10/hr=12/rs=NA"); + partNames.add("ds=2010-11-10/hr=13/rs=AS"); + partNames.add("ds=2010-11-10/hr=23/rs=AE"); + // Metastore use this to filter partition names at default + PartitionPruner.prunePartitionNames( + partColumnNames, partColumnTypeInfos, funcDesc, "__HIVE_DEFAULT_PARTITION__", partNames); + // hr > 'a123' and hr <= '2346b' filter nothing + Assert.assertTrue(partNames.contains("ds=2010-11-10/hr=12/rs=NA")); + Assert.assertTrue(partNames.contains("ds=2010-11-10/hr=13/rs=AS")); + Assert.assertTrue(partNames.contains("ds=2010-11-10/hr=23/rs=AE")); + } + + private ExprNodeDesc genExprNodeByDefault(TypeCheckCtx tcCtx, ASTNode command) throws Exception { + for (int childIndex = 0; childIndex < command.getChildCount(); childIndex++) { + ASTNode astChild = (ASTNode) command.getChild(childIndex); + if (astChild.getType() == HiveParser.TOK_WHERE) { + ASTNode conds = (ASTNode) astChild.getChild(0); + Map nodeOutputs = ExprNodeTypeCheck.genExprNode(conds, tcCtx); + return nodeOutputs.get(conds); + } + } + return null; + } +} diff --git ql/src/test/queries/clientpositive/show_partitions2.q ql/src/test/queries/clientpositive/show_partitions2.q new file mode 100644 index 0000000000..af6fc8d91f --- /dev/null +++ ql/src/test/queries/clientpositive/show_partitions2.q @@ -0,0 +1,43 @@ +CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hr INT, rs STRING); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=17, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=19, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=22, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=12, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=10, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=15, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=16, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=20, rs='AF'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=21, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-11', hr=16, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-11', hr=22, rs='AS'); + +CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hr INT, rs STRING); +INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA'); + +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; +INSERT INTO TABLE mpart1 PARTITION(ds, hr, rs) SELECT * FROM srcpart1; + +SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' ORDER BY rs DESC, hr LIMIT 4; +SHOW PARTITIONS mpart1 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hr >= 20; +SHOW PARTITIONS mpart1 WHERE hr > 9 AND hr < 19 ORDER BY hr DESC, ds; + +SHOW PARTITIONS mpart1 ORDER BY hr ASC, ds DESC; +SHOW PARTITIONS mpart1 PARTITION (rs='AS') ORDER BY ds DESC; + +SHOW PARTITIONS mpart1 LIMIT 3; +SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 3; + +SHOW PARTITIONS mpart1 where ds = '__HIVE_DEFAULT_PARTITION__' AND hr = '__HIVE_DEFAULT_PARTITION__'; +SHOW PARTITIONS mpart1 where (ds = '__HIVE_DEFAULT_PARTITION__' OR hr = '__HIVE_DEFAULT_PARTITION__') AND rs = 'NA'; +SHOW PARTITIONS mpart1 where ds >= '1980-11-10' AND hr = '__HIVE_DEFAULT_PARTITION__' ORDER BY ds DESC; +SHOW PARTITIONS mpart1 where ds = '1980-11-10' AND hr != '__HIVE_DEFAULT_PARTITION__' ORDER BY hr DESC; + +EXPLAIN SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' AND hr >= 20; +EXPLAIN SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' ORDER BY rs DESC, hr LIMIT 4; +EXPLAIN SHOW PARTITIONS mpart1 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hr >= 20; +EXPLAIN SHOW PARTITIONS mpart1 ORDER BY hr DESC, ds DESC; +EXPLAIN SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 3; +EXPLAIN SHOW PARTITIONS mpart1 where (ds = '__HIVE_DEFAULT_PARTITION__' OR hr = '__HIVE_DEFAULT_PARTITION__') AND rs = 'NA'; +EXPLAIN SHOW PARTITIONS mpart1 where ds >= '1980-11-10' AND hr = '__HIVE_DEFAULT_PARTITION__' ORDER BY ds DESC; diff --git ql/src/test/results/clientpositive/llap/drop_partitions_filter.q.out ql/src/test/results/clientpositive/llap/drop_partitions_filter.q.out index edfbbf7127..5e7da876e1 100644 --- ql/src/test/results/clientpositive/llap/drop_partitions_filter.q.out +++ ql/src/test/results/clientpositive/llap/drop_partitions_filter.q.out @@ -155,6 +155,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 table: ptestfilter_n1 Stage: Stage-1 diff --git ql/src/test/results/clientpositive/llap/show_partitions.q.out ql/src/test/results/clientpositive/llap/show_partitions.q.out index 0a73374987..feabfcf366 100644 --- ql/src/test/results/clientpositive/llap/show_partitions.q.out +++ ql/src/test/results/clientpositive/llap/show_partitions.q.out @@ -128,6 +128,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 partSpec: hr 11 table: default.srcpart @@ -159,6 +160,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 partSpec: ds 2008-04-08 hr 12 diff --git ql/src/test/results/clientpositive/llap/show_partitions2.q.out ql/src/test/results/clientpositive/llap/show_partitions2.q.out new file mode 100644 index 0000000000..46a9ee35cd --- /dev/null +++ ql/src/test/results/clientpositive/llap/show_partitions2.q.out @@ -0,0 +1,417 @@ +PREHOOK: query: CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hr INT, rs STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mpart1 +POSTHOOK: query: CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds DATE, hr INT, rs STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mpart1 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=17, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=17, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hr=17/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=19, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=19, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hr=19/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=22, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hr=22, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hr=22/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=12, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=12, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hr=12/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=10, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=10, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hr=10/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=15, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=15, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hr=15/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=16, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=16, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hr=16/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=20, rs='AF') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=20, rs='AF') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hr=20/rs=AF +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=21, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hr=21, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hr=21/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-11', hr=16, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-11', hr=16, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-11/hr=16/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-11', hr=22, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-11', hr=22, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-11/hr=22/rs=AS +PREHOOK: query: CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hr INT, rs STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart1 +POSTHOOK: query: CREATE TABLE srcpart1 (key1 INT, value1 STRING, ds DATE, hr INT, rs STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart1 +PREHOOK: query: INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@srcpart1 +POSTHOOK: query: INSERT INTO TABLE srcpart1 VALUES (1, 'val1', 'null', 'null', 'AS'), (2, 'val2', '1980-11-11', '12', 'AS'), + (3, 'val3', '1980-11-10', '21', 'NA'), (4, 'val4', '1980-11-11', 'null', 'NA'), (5, 'val5', '1980-11-10', 'null', 'NA') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@srcpart1 +POSTHOOK: Lineage: srcpart1.ds SCRIPT [] +POSTHOOK: Lineage: srcpart1.hr SCRIPT [] +POSTHOOK: Lineage: srcpart1.key1 SCRIPT [] +POSTHOOK: Lineage: srcpart1.rs SCRIPT [] +POSTHOOK: Lineage: srcpart1.value1 SCRIPT [] +PREHOOK: query: INSERT INTO TABLE mpart1 PARTITION(ds, hr, rs) SELECT * FROM srcpart1 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart1 +PREHOOK: Output: default@mpart1 +POSTHOOK: query: INSERT INTO TABLE mpart1 PARTITION(ds, hr, rs) SELECT * FROM srcpart1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hr=21/rs=NA +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +POSTHOOK: Output: default@mpart1@ds=1980-11-11/hr=12/rs=AS +POSTHOOK: Output: default@mpart1@ds=1980-11-11/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +POSTHOOK: Output: default@mpart1@ds=__HIVE_DEFAULT_PARTITION__/hr=__HIVE_DEFAULT_PARTITION__/rs=AS +POSTHOOK: Lineage: mpart1 PARTITION(ds=1980-11-10,hr=21,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=1980-11-10,hr=21,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=1980-11-10,hr=__HIVE_DEFAULT_PARTITION__,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=1980-11-10,hr=__HIVE_DEFAULT_PARTITION__,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=1980-11-11,hr=12,rs=AS).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=1980-11-11,hr=12,rs=AS).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=1980-11-11,hr=__HIVE_DEFAULT_PARTITION__,rs=NA).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=1980-11-11,hr=__HIVE_DEFAULT_PARTITION__,rs=NA).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=__HIVE_DEFAULT_PARTITION__,hr=__HIVE_DEFAULT_PARTITION__,rs=AS).key1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:key1, type:int, comment:null), ] +POSTHOOK: Lineage: mpart1 PARTITION(ds=__HIVE_DEFAULT_PARTITION__,hr=__HIVE_DEFAULT_PARTITION__,rs=AS).value1 SIMPLE [(srcpart1)srcpart1.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' ORDER BY rs DESC, hr LIMIT 4 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' ORDER BY rs DESC, hr LIMIT 4 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hr=21/rs=NA +ds=1980-11-10/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hr=12/rs=EU +ds=1980-11-10/hr=15/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hr >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hr >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hr=21/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE hr > 9 AND hr < 19 ORDER BY hr DESC, ds +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE hr > 9 AND hr < 19 ORDER BY hr DESC, ds +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-09/hr=17/rs=EU +ds=1980-11-10/hr=16/rs=AS +ds=1980-11-11/hr=16/rs=AS +ds=1980-11-10/hr=15/rs=EU +ds=1980-11-10/hr=12/rs=EU +ds=1980-11-11/hr=12/rs=AS +ds=1980-11-10/hr=10/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 ORDER BY hr ASC, ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 ORDER BY hr ASC, ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hr=10/rs=AS +ds=1980-11-11/hr=12/rs=AS +ds=1980-11-10/hr=12/rs=EU +ds=1980-11-10/hr=15/rs=EU +ds=1980-11-11/hr=16/rs=AS +ds=1980-11-10/hr=16/rs=AS +ds=1980-11-09/hr=17/rs=EU +ds=1980-11-09/hr=19/rs=AS +ds=1980-11-10/hr=20/rs=AF +ds=1980-11-10/hr=21/rs=NA +ds=1980-11-10/hr=21/rs=AS +ds=1980-11-11/hr=22/rs=AS +ds=1980-11-09/hr=22/rs=AS +ds=__HIVE_DEFAULT_PARTITION__/hr=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-11/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION (rs='AS') ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION (rs='AS') ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=__HIVE_DEFAULT_PARTITION__/hr=__HIVE_DEFAULT_PARTITION__/rs=AS +ds=1980-11-11/hr=12/rs=AS +ds=1980-11-11/hr=22/rs=AS +ds=1980-11-11/hr=16/rs=AS +ds=1980-11-10/hr=21/rs=AS +ds=1980-11-10/hr=16/rs=AS +ds=1980-11-10/hr=10/rs=AS +ds=1980-11-09/hr=22/rs=AS +ds=1980-11-09/hr=19/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-09/hr=17/rs=EU +ds=1980-11-09/hr=19/rs=AS +ds=1980-11-09/hr=22/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hr=12/rs=EU +ds=1980-11-10/hr=10/rs=AS +ds=1980-11-10/hr=15/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 where ds = '__HIVE_DEFAULT_PARTITION__' AND hr = '__HIVE_DEFAULT_PARTITION__' +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 where ds = '__HIVE_DEFAULT_PARTITION__' AND hr = '__HIVE_DEFAULT_PARTITION__' +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=__HIVE_DEFAULT_PARTITION__/hr=__HIVE_DEFAULT_PARTITION__/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 where (ds = '__HIVE_DEFAULT_PARTITION__' OR hr = '__HIVE_DEFAULT_PARTITION__') AND rs = 'NA' +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 where (ds = '__HIVE_DEFAULT_PARTITION__' OR hr = '__HIVE_DEFAULT_PARTITION__') AND rs = 'NA' +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-11/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart1 where ds >= '1980-11-10' AND hr = '__HIVE_DEFAULT_PARTITION__' ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 where ds >= '1980-11-10' AND hr = '__HIVE_DEFAULT_PARTITION__' ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-11/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +ds=1980-11-10/hr=__HIVE_DEFAULT_PARTITION__/rs=NA +PREHOOK: query: SHOW PARTITIONS mpart1 where ds = '1980-11-10' AND hr != '__HIVE_DEFAULT_PARTITION__' ORDER BY hr DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 where ds = '1980-11-10' AND hr != '__HIVE_DEFAULT_PARTITION__' ORDER BY hr DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hr=21/rs=NA +ds=1980-11-10/hr=21/rs=AS +ds=1980-11-10/hr=20/rs=AF +ds=1980-11-10/hr=16/rs=AS +ds=1980-11-10/hr=15/rs=EU +ds=1980-11-10/hr=12/rs=EU +ds=1980-11-10/hr=10/rs=AS +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' AND hr >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' AND hr >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + condition: ((ds = DATE'1980-11-10') and (hr >= 20)) + limit: -1 + table: mpart1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' ORDER BY rs DESC, hr LIMIT 4 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' ORDER BY rs DESC, hr LIMIT 4 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + condition: (ds = DATE'1980-11-10') + limit: 4 + order: 2,1:-+ + table: mpart1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hr >= 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 PARTITION (rs = 'AS') WHERE ds = '1980-11-10' AND hr >= 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + condition: ((ds = DATE'1980-11-10') and (hr >= 20)) + limit: -1 + partSpec: + rs AS + table: mpart1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 ORDER BY hr DESC, ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 ORDER BY hr DESC, ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + limit: -1 + order: 1,0:-- + table: mpart1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 PARTITION(ds = '1980-11-10') LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + limit: 3 + partSpec: + ds 1980-11-10 + table: mpart1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 where (ds = '__HIVE_DEFAULT_PARTITION__' OR hr = '__HIVE_DEFAULT_PARTITION__') AND rs = 'NA' +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 where (ds = '__HIVE_DEFAULT_PARTITION__' OR hr = '__HIVE_DEFAULT_PARTITION__') AND rs = 'NA' +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + condition: ((ds is null or hr is null) and (rs = 'NA')) + limit: -1 + table: mpart1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 where ds >= '1980-11-10' AND hr = '__HIVE_DEFAULT_PARTITION__' ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: EXPLAIN SHOW PARTITIONS mpart1 where ds >= '1980-11-10' AND hr = '__HIVE_DEFAULT_PARTITION__' ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + condition: ((ds >= DATE'1980-11-10') and hr is null) + limit: -1 + order: 0:- + table: mpart1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git ql/src/test/results/clientpositive/llap/showparts.q.out ql/src/test/results/clientpositive/llap/showparts.q.out index 662b7a2fab..febd590e0c 100644 --- ql/src/test/results/clientpositive/llap/showparts.q.out +++ ql/src/test/results/clientpositive/llap/showparts.q.out @@ -13,6 +13,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 table: srcpart Stage: Stage-1 diff --git ql/src/test/results/clientpositive/llap/temp_table_drop_partitions_filter.q.out ql/src/test/results/clientpositive/llap/temp_table_drop_partitions_filter.q.out index ec4d537ced..c6bcf5f1d5 100644 --- ql/src/test/results/clientpositive/llap/temp_table_drop_partitions_filter.q.out +++ ql/src/test/results/clientpositive/llap/temp_table_drop_partitions_filter.q.out @@ -155,6 +155,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Partitions + limit: -1 table: ptestfilter_n1_temp Stage: Stage-1 diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java index 0e72625e01..c55da7377b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField DEFAULT_PARTITION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultPartitionName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I16, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("order", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private String defaultPartitionName; // optional private short maxParts; // optional private String catName; // optional + private String order; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ EXPR((short)3, "expr"), DEFAULT_PARTITION_NAME((short)4, "defaultPartitionName"), MAX_PARTS((short)5, "maxParts"), - CAT_NAME((short)6, "catName"); + CAT_NAME((short)6, "catName"), + ORDER((short)7, "order"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return MAX_PARTS; case 6: // CAT_NAME return CAT_NAME; + case 7: // ORDER + return ORDER; default: return null; } @@ -134,7 +139,7 @@ public String getFieldName() { // isset id assignments private static final int __MAXPARTS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME,_Fields.ORDER}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -150,6 +155,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ORDER, new org.apache.thrift.meta_data.FieldMetaData("order", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsByExprRequest.class, metaDataMap); } @@ -191,6 +198,9 @@ public PartitionsByExprRequest(PartitionsByExprRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + if (other.isSetOrder()) { + this.order = other.order; + } } public PartitionsByExprRequest deepCopy() { @@ -206,6 +216,7 @@ public void clear() { this.maxParts = (short)-1; this.catName = null; + this.order = null; } public String getDbName() { @@ -354,6 +365,29 @@ public void setCatNameIsSet(boolean value) { } } + public String getOrder() { + return this.order; + } + + public void setOrder(String order) { + this.order = order; + } + + public void unsetOrder() { + this.order = null; + } + + /** Returns true if field order is set (has been assigned a value) and false otherwise */ + public boolean isSetOrder() { + return this.order != null; + } + + public void setOrderIsSet(boolean value) { + if (!value) { + this.order = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -404,6 +438,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ORDER: + if (value == null) { + unsetOrder(); + } else { + setOrder((String)value); + } + break; + } } @@ -427,6 +469,9 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case ORDER: + return getOrder(); + } throw new IllegalStateException(); } @@ -450,6 +495,8 @@ public boolean isSet(_Fields field) { return isSetMaxParts(); case CAT_NAME: return isSetCatName(); + case ORDER: + return isSetOrder(); } throw new IllegalStateException(); } @@ -521,6 +568,15 @@ public boolean equals(PartitionsByExprRequest that) { return false; } + boolean this_present_order = true && this.isSetOrder(); + boolean that_present_order = true && that.isSetOrder(); + if (this_present_order || that_present_order) { + if (!(this_present_order && that_present_order)) + return false; + if (!this.order.equals(that.order)) + return false; + } + return true; } @@ -558,6 +614,11 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_order = true && (isSetOrder()); + list.add(present_order); + if (present_order) + list.add(order); + return list.hashCode(); } @@ -629,6 +690,16 @@ public int compareTo(PartitionsByExprRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetOrder()).compareTo(other.isSetOrder()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOrder()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.order, other.order); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -698,6 +769,16 @@ public String toString() { } first = false; } + if (isSetOrder()) { + if (!first) sb.append(", "); + sb.append("order:"); + if (this.order == null) { + sb.append("null"); + } else { + sb.append(this.order); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -803,6 +884,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprReq org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // ORDER + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.order = iprot.readString(); + struct.setOrderIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -850,6 +939,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldEnd(); } } + if (struct.order != null) { + if (struct.isSetOrder()) { + oprot.writeFieldBegin(ORDER_FIELD_DESC); + oprot.writeString(struct.order); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -880,7 +976,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetCatName()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetOrder()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDefaultPartitionName()) { oprot.writeString(struct.defaultPartitionName); } @@ -890,6 +989,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetOrder()) { + oprot.writeString(struct.order); + } } @Override @@ -901,7 +1003,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.setTblNameIsSet(true); struct.expr = iprot.readBinary(); struct.setExprIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.defaultPartitionName = iprot.readString(); struct.setDefaultPartitionNameIsSet(true); @@ -914,6 +1016,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(3)) { + struct.order = iprot.readString(); + struct.setOrderIsSet(true); + } } } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 26cc9dd137..830dd78c3c 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -206,6 +206,8 @@ public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partition_names_req(PartitionsByExprRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -672,6 +674,8 @@ public void get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_names_req(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -3453,6 +3457,35 @@ public void send_get_partition_names_ps(String db_name, String tbl_name, List get_partition_names_req(PartitionsByExprRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + send_get_partition_names_req(req); + return recv_get_partition_names_req(); + } + + public void send_get_partition_names_req(PartitionsByExprRequest req) throws org.apache.thrift.TException + { + get_partition_names_req_args args = new get_partition_names_req_args(); + args.setReq(req); + sendBase("get_partition_names_req", args); + } + + public List recv_get_partition_names_req() throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + get_partition_names_req_result result = new get_partition_names_req_result(); + receiveBase(result, "get_partition_names_req"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partition_names_req failed: unknown result"); + } + public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); @@ -10536,6 +10569,38 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } + public void get_partition_names_req(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_partition_names_req_call method_call = new get_partition_names_req_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_req_call extends org.apache.thrift.async.TAsyncMethodCall { + private PartitionsByExprRequest req; + public get_partition_names_req_call(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_partition_names_req", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_partition_names_req_args args = new get_partition_names_req_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public List getResult() throws MetaException, NoSuchObjectException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_partition_names_req(); + } + } + public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); get_partitions_by_filter_call method_call = new get_partitions_by_filter_call(db_name, tbl_name, filter, max_parts, resultHandler, this, ___protocolFactory, ___transport); @@ -15588,6 +15653,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_partition_names_req() { + super("get_partition_names_req"); + } + + public get_partition_names_req_args getEmptyArgsInstance() { + return new get_partition_names_req_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_partition_names_req_result getResult(I iface, get_partition_names_req_args args) throws org.apache.thrift.TException { + get_partition_names_req_result result = new get_partition_names_req_result(); + try { + result.success = iface.get_partition_names_req(args.req); + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter extends org.apache.thrift.ProcessFunction { public get_partitions_by_filter() { super("get_partitions_by_filter"); @@ -21743,6 +21835,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_by_filter() { - super("get_partitions_by_filter"); - } - - public get_partitions_by_filter_args getEmptyArgsInstance() { - return new get_partitions_by_filter_args(); - } - - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partitions_by_filter_result result = new get_partitions_by_filter_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - get_partitions_by_filter_result result = new get_partitions_by_filter_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter extends org.apache.thrift.AsyncProcessFunction> { - public get_part_specs_by_filter() { - super("get_part_specs_by_filter"); - } - - public get_part_specs_by_filter_args getEmptyArgsInstance() { - return new get_part_specs_by_filter_args(); - } - - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr extends org.apache.thrift.AsyncProcessFunction { - public get_partitions_by_expr() { - super("get_partitions_by_expr"); - } - - public get_partitions_by_expr_args getEmptyArgsInstance() { - return new get_partitions_by_expr_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(PartitionsByExprResult o) { - get_partitions_by_expr_result result = new get_partitions_by_expr_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - get_partitions_by_expr_result result = new get_partitions_by_expr_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partitions_by_expr(args.req,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_num_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction { - public get_num_partitions_by_filter() { - super("get_num_partitions_by_filter"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_req extends org.apache.thrift.AsyncProcessFunction> { + public get_partition_names_req() { + super("get_partition_names_req"); } - public get_num_partitions_by_filter_args getEmptyArgsInstance() { - return new get_num_partitions_by_filter_args(); + public get_partition_names_req_args getEmptyArgsInstance() { + return new get_partition_names_req_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Integer o) { - get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partition_names_req_result result = new get_partition_names_req_result(); result.success = o; - result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -27264,7 +27170,7 @@ public void onComplete(Integer o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + get_partition_names_req_result result = new get_partition_names_req_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -27295,25 +27201,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_num_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter,resultHandler); + public void start(I iface, get_partition_names_req_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partition_names_req(args.req,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_by_names() { - super("get_partitions_by_names"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_by_filter() { + super("get_partitions_by_filter"); } - public get_partitions_by_names_args getEmptyArgsInstance() { - return new get_partitions_by_names_args(); + public get_partitions_by_filter_args getEmptyArgsInstance() { + return new get_partitions_by_filter_args(); } public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback>() { public void onComplete(List o) { - get_partitions_by_names_result result = new get_partitions_by_names_result(); + get_partitions_by_filter_result result = new get_partitions_by_filter_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -27326,7 +27232,256 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_by_names_result result = new get_partitions_by_names_result(); + get_partitions_by_filter_result result = new get_partitions_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter extends org.apache.thrift.AsyncProcessFunction> { + public get_part_specs_by_filter() { + super("get_part_specs_by_filter"); + } + + public get_part_specs_by_filter_args getEmptyArgsInstance() { + return new get_part_specs_by_filter_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr extends org.apache.thrift.AsyncProcessFunction { + public get_partitions_by_expr() { + super("get_partitions_by_expr"); + } + + public get_partitions_by_expr_args getEmptyArgsInstance() { + return new get_partitions_by_expr_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(PartitionsByExprResult o) { + get_partitions_by_expr_result result = new get_partitions_by_expr_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_partitions_by_expr_result result = new get_partitions_by_expr_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_partitions_by_expr(args.req,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_num_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction { + public get_num_partitions_by_filter() { + super("get_num_partitions_by_filter"); + } + + public get_num_partitions_by_filter_args getEmptyArgsInstance() { + return new get_num_partitions_by_filter_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Integer o) { + get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + result.success = o; + result.setSuccessIsSet(true); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_num_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_by_names() { + super("get_partitions_by_names"); + } + + public get_partitions_by_names_args getEmptyArgsInstance() { + return new get_partitions_by_names_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partitions_by_names_result result = new get_partitions_by_names_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_partitions_by_names_result result = new get_partitions_by_names_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -125046,31 +125201,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_filter_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partition_names_req_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_by_filter_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_by_filter_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partition_names_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partition_names_req_argsTupleSchemeFactory()); } - private String db_name; // required - private String tbl_name; // required - private String filter; // required - private short max_parts; // required + private PartitionsByExprRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"), - FILTER((short)3, "filter"), - MAX_PARTS((short)4, "max_parts"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -125085,14 +125231,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME - return DB_NAME; - case 2: // TBL_NAME - return TBL_NAME; - case 3: // FILTER - return FILTER; - case 4: // MAX_PARTS - return MAX_PARTS; + case 1: // REQ + return REQ; default: return null; } @@ -125133,194 +125273,73 @@ public String getFieldName() { } // isset id assignments - private static final int __MAX_PARTS_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsByExprRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_names_req_args.class, metaDataMap); } - public get_partitions_by_filter_args() { - this.max_parts = (short)-1; - + public get_partition_names_req_args() { } - public get_partitions_by_filter_args( - String db_name, - String tbl_name, - String filter, - short max_parts) + public get_partition_names_req_args( + PartitionsByExprRequest req) { this(); - this.db_name = db_name; - this.tbl_name = tbl_name; - this.filter = filter; - this.max_parts = max_parts; - setMax_partsIsSet(true); + this.req = req; } /** * Performs a deep copy on other. */ - public get_partitions_by_filter_args(get_partitions_by_filter_args other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetDb_name()) { - this.db_name = other.db_name; - } - if (other.isSetTbl_name()) { - this.tbl_name = other.tbl_name; - } - if (other.isSetFilter()) { - this.filter = other.filter; + public get_partition_names_req_args(get_partition_names_req_args other) { + if (other.isSetReq()) { + this.req = new PartitionsByExprRequest(other.req); } - this.max_parts = other.max_parts; } - public get_partitions_by_filter_args deepCopy() { - return new get_partitions_by_filter_args(this); + public get_partition_names_req_args deepCopy() { + return new get_partition_names_req_args(this); } @Override public void clear() { - this.db_name = null; - this.tbl_name = null; - this.filter = null; - this.max_parts = (short)-1; - - } - - public String getDb_name() { - return this.db_name; - } - - public void setDb_name(String db_name) { - this.db_name = db_name; - } - - public void unsetDb_name() { - this.db_name = null; - } - - /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_name() { - return this.db_name != null; - } - - public void setDb_nameIsSet(boolean value) { - if (!value) { - this.db_name = null; - } - } - - public String getTbl_name() { - return this.tbl_name; - } - - public void setTbl_name(String tbl_name) { - this.tbl_name = tbl_name; - } - - public void unsetTbl_name() { - this.tbl_name = null; - } - - /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_name() { - return this.tbl_name != null; - } - - public void setTbl_nameIsSet(boolean value) { - if (!value) { - this.tbl_name = null; - } + this.req = null; } - public String getFilter() { - return this.filter; + public PartitionsByExprRequest getReq() { + return this.req; } - public void setFilter(String filter) { - this.filter = filter; + public void setReq(PartitionsByExprRequest req) { + this.req = req; } - public void unsetFilter() { - this.filter = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field filter is set (has been assigned a value) and false otherwise */ - public boolean isSetFilter() { - return this.filter != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setFilterIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.filter = null; + this.req = null; } } - public short getMax_parts() { - return this.max_parts; - } - - public void setMax_parts(short max_parts) { - this.max_parts = max_parts; - setMax_partsIsSet(true); - } - - public void unsetMax_parts() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_PARTS_ISSET_ID); - } - - /** Returns true if field max_parts is set (has been assigned a value) and false otherwise */ - public boolean isSetMax_parts() { - return EncodingUtils.testBit(__isset_bitfield, __MAX_PARTS_ISSET_ID); - } - - public void setMax_partsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_NAME: - if (value == null) { - unsetDb_name(); - } else { - setDb_name((String)value); - } - break; - - case TBL_NAME: - if (value == null) { - unsetTbl_name(); - } else { - setTbl_name((String)value); - } - break; - - case FILTER: - if (value == null) { - unsetFilter(); - } else { - setFilter((String)value); - } - break; - - case MAX_PARTS: + case REQ: if (value == null) { - unsetMax_parts(); + unsetReq(); } else { - setMax_parts((Short)value); + setReq((PartitionsByExprRequest)value); } break; @@ -125329,17 +125348,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_NAME: - return getDb_name(); - - case TBL_NAME: - return getTbl_name(); - - case FILTER: - return getFilter(); - - case MAX_PARTS: - return getMax_parts(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -125352,14 +125362,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_NAME: - return isSetDb_name(); - case TBL_NAME: - return isSetTbl_name(); - case FILTER: - return isSetFilter(); - case MAX_PARTS: - return isSetMax_parts(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -125368,48 +125372,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partitions_by_filter_args) - return this.equals((get_partitions_by_filter_args)that); + if (that instanceof get_partition_names_req_args) + return this.equals((get_partition_names_req_args)that); return false; } - public boolean equals(get_partitions_by_filter_args that) { + public boolean equals(get_partition_names_req_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) - return false; - if (!this.db_name.equals(that.db_name)) - return false; - } - - boolean this_present_tbl_name = true && this.isSetTbl_name(); - boolean that_present_tbl_name = true && that.isSetTbl_name(); - if (this_present_tbl_name || that_present_tbl_name) { - if (!(this_present_tbl_name && that_present_tbl_name)) - return false; - if (!this.tbl_name.equals(that.tbl_name)) - return false; - } - - boolean this_present_filter = true && this.isSetFilter(); - boolean that_present_filter = true && that.isSetFilter(); - if (this_present_filter || that_present_filter) { - if (!(this_present_filter && that_present_filter)) - return false; - if (!this.filter.equals(that.filter)) - return false; - } - - boolean this_present_max_parts = true; - boolean that_present_max_parts = true; - if (this_present_max_parts || that_present_max_parts) { - if (!(this_present_max_parts && that_present_max_parts)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (this.max_parts != that.max_parts) + if (!this.req.equals(that.req)) return false; } @@ -125420,73 +125397,28 @@ public boolean equals(get_partitions_by_filter_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_name = true && (isSetDb_name()); - list.add(present_db_name); - if (present_db_name) - list.add(db_name); - - boolean present_tbl_name = true && (isSetTbl_name()); - list.add(present_tbl_name); - if (present_tbl_name) - list.add(tbl_name); - - boolean present_filter = true && (isSetFilter()); - list.add(present_filter); - if (present_filter) - list.add(filter); - - boolean present_max_parts = true; - list.add(present_max_parts); - if (present_max_parts) - list.add(max_parts); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(get_partitions_by_filter_args other) { + public int compareTo(get_partition_names_req_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDb_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTbl_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetFilter()).compareTo(other.isSetFilter()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFilter()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filter, other.filter); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetMax_parts()).compareTo(other.isSetMax_parts()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetMax_parts()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_parts, other.max_parts); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -125508,36 +125440,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partitions_by_filter_args("); + StringBuilder sb = new StringBuilder("get_partition_names_req_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { - sb.append("null"); - } else { - sb.append(this.db_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("tbl_name:"); - if (this.tbl_name == null) { - sb.append("null"); - } else { - sb.append(this.tbl_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("filter:"); - if (this.filter == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.filter); + sb.append(this.req); } first = false; - if (!first) sb.append(", "); - sb.append("max_parts:"); - sb.append(this.max_parts); - first = false; sb.append(")"); return sb.toString(); } @@ -125545,6 +125457,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (req != null) { + req.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -125557,23 +125472,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_partitions_by_filter_argsStandardSchemeFactory implements SchemeFactory { - public get_partitions_by_filter_argsStandardScheme getScheme() { - return new get_partitions_by_filter_argsStandardScheme(); + private static class get_partition_names_req_argsStandardSchemeFactory implements SchemeFactory { + public get_partition_names_req_argsStandardScheme getScheme() { + return new get_partition_names_req_argsStandardScheme(); } } - private static class get_partitions_by_filter_argsStandardScheme extends StandardScheme { + private static class get_partition_names_req_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names_req_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -125583,34 +125496,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f break; } switch (schemeField.id) { - case 1: // DB_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TBL_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // FILTER - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.filter = iprot.readString(); - struct.setFilterIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // MAX_PARTS - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.max_parts = iprot.readI16(); - struct.setMax_partsIsSet(true); + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new PartitionsByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -125624,100 +125514,58 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_names_req_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_name != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.db_name); - oprot.writeFieldEnd(); - } - if (struct.tbl_name != null) { - oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); - oprot.writeString(struct.tbl_name); - oprot.writeFieldEnd(); - } - if (struct.filter != null) { - oprot.writeFieldBegin(FILTER_FIELD_DESC); - oprot.writeString(struct.filter); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } - oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); - oprot.writeI16(struct.max_parts); - oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_partitions_by_filter_argsTupleSchemeFactory implements SchemeFactory { - public get_partitions_by_filter_argsTupleScheme getScheme() { - return new get_partitions_by_filter_argsTupleScheme(); + private static class get_partition_names_req_argsTupleSchemeFactory implements SchemeFactory { + public get_partition_names_req_argsTupleScheme getScheme() { + return new get_partition_names_req_argsTupleScheme(); } } - private static class get_partitions_by_filter_argsTupleScheme extends TupleScheme { + private static class get_partition_names_req_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names_req_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_name()) { + if (struct.isSetReq()) { optionals.set(0); } - if (struct.isSetTbl_name()) { - optionals.set(1); - } - if (struct.isSetFilter()) { - optionals.set(2); - } - if (struct.isSetMax_parts()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetDb_name()) { - oprot.writeString(struct.db_name); - } - if (struct.isSetTbl_name()) { - oprot.writeString(struct.tbl_name); - } - if (struct.isSetFilter()) { - oprot.writeString(struct.filter); - } - if (struct.isSetMax_parts()) { - oprot.writeI16(struct.max_parts); + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_req_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } - if (incoming.get(1)) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } - if (incoming.get(2)) { - struct.filter = iprot.readString(); - struct.setFilterIsSet(true); - } - if (incoming.get(3)) { - struct.max_parts = iprot.readI16(); - struct.setMax_partsIsSet(true); + struct.req = new PartitionsByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_filter_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partition_names_req_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -125725,11 +125573,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_by_filter_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_by_filter_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partition_names_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partition_names_req_resultTupleSchemeFactory()); } - private List success; // required + private List success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -125803,20 +125651,20 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_names_req_result.class, metaDataMap); } - public get_partitions_by_filter_result() { + public get_partition_names_req_result() { } - public get_partitions_by_filter_result( - List success, + public get_partition_names_req_result( + List success, MetaException o1, NoSuchObjectException o2) { @@ -125829,12 +125677,9 @@ public get_partitions_by_filter_result( /** * Performs a deep copy on other. */ - public get_partitions_by_filter_result(get_partitions_by_filter_result other) { + public get_partition_names_req_result(get_partition_names_req_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success.size()); - for (Partition other_element : other.success) { - __this__success.add(new Partition(other_element)); - } + List __this__success = new ArrayList(other.success); this.success = __this__success; } if (other.isSetO1()) { @@ -125845,8 +125690,8 @@ public get_partitions_by_filter_result(get_partitions_by_filter_result other) { } } - public get_partitions_by_filter_result deepCopy() { - return new get_partitions_by_filter_result(this); + public get_partition_names_req_result deepCopy() { + return new get_partition_names_req_result(this); } @Override @@ -125860,22 +125705,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(Partition elem) { + public void addToSuccess(String elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -125946,7 +125791,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((List)value); } break; @@ -126005,12 +125850,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partitions_by_filter_result) - return this.equals((get_partitions_by_filter_result)that); + if (that instanceof get_partition_names_req_result) + return this.equals((get_partition_names_req_result)that); return false; } - public boolean equals(get_partitions_by_filter_result that) { + public boolean equals(get_partition_names_req_result that) { if (that == null) return false; @@ -126067,7 +125912,7 @@ public int hashCode() { } @Override - public int compareTo(get_partitions_by_filter_result other) { + public int compareTo(get_partition_names_req_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -126121,7 +125966,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partitions_by_filter_result("); + StringBuilder sb = new StringBuilder("get_partition_names_req_result("); boolean first = true; sb.append("success:"); @@ -126172,15 +126017,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_partitions_by_filter_resultStandardSchemeFactory implements SchemeFactory { - public get_partitions_by_filter_resultStandardScheme getScheme() { - return new get_partitions_by_filter_resultStandardScheme(); + private static class get_partition_names_req_resultStandardSchemeFactory implements SchemeFactory { + public get_partition_names_req_resultStandardScheme getScheme() { + return new get_partition_names_req_resultStandardScheme(); } } - private static class get_partitions_by_filter_resultStandardScheme extends StandardScheme { + private static class get_partition_names_req_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names_req_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -126194,12 +126039,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); - struct.success = new ArrayList(_list1598.size); - Partition _elem1599; + struct.success = new ArrayList(_list1598.size); + String _elem1599; for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) { - _elem1599 = new Partition(); - _elem1599.read(iprot); + _elem1599 = iprot.readString(); struct.success.add(_elem1599); } iprot.readListEnd(); @@ -126236,17 +126080,17 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_names_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1601 : struct.success) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter1601 : struct.success) { - _iter1601.write(oprot); + oprot.writeString(_iter1601); } oprot.writeListEnd(); } @@ -126268,16 +126112,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ } - private static class get_partitions_by_filter_resultTupleSchemeFactory implements SchemeFactory { - public get_partitions_by_filter_resultTupleScheme getScheme() { - return new get_partitions_by_filter_resultTupleScheme(); + private static class get_partition_names_req_resultTupleSchemeFactory implements SchemeFactory { + public get_partition_names_req_resultTupleScheme getScheme() { + return new get_partition_names_req_resultTupleScheme(); } } - private static class get_partitions_by_filter_resultTupleScheme extends TupleScheme { + private static class get_partition_names_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -126293,9 +126137,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1602 : struct.success) + for (String _iter1602 : struct.success) { - _iter1602.write(oprot); + oprot.writeString(_iter1602); } } } @@ -126308,18 +126152,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1603.size); - Partition _elem1604; + org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1603.size); + String _elem1604; for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) { - _elem1604 = new Partition(); - _elem1604.read(iprot); + _elem1604 = iprot.readString(); struct.success.add(_elem1604); } } @@ -126340,24 +126183,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_part_specs_by_filter_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_filter_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I32, (short)4); + private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_part_specs_by_filter_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_part_specs_by_filter_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partitions_by_filter_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_by_filter_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required private String filter; // required - private int max_parts; // required + private short max_parts; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -126439,21 +126282,21 @@ public String getFieldName() { tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_args.class, metaDataMap); } - public get_part_specs_by_filter_args() { - this.max_parts = -1; + public get_partitions_by_filter_args() { + this.max_parts = (short)-1; } - public get_part_specs_by_filter_args( + public get_partitions_by_filter_args( String db_name, String tbl_name, String filter, - int max_parts) + short max_parts) { this(); this.db_name = db_name; @@ -126466,7 +126309,7 @@ public get_part_specs_by_filter_args( /** * Performs a deep copy on other. */ - public get_part_specs_by_filter_args(get_part_specs_by_filter_args other) { + public get_partitions_by_filter_args(get_partitions_by_filter_args other) { __isset_bitfield = other.__isset_bitfield; if (other.isSetDb_name()) { this.db_name = other.db_name; @@ -126480,8 +126323,8 @@ public get_part_specs_by_filter_args(get_part_specs_by_filter_args other) { this.max_parts = other.max_parts; } - public get_part_specs_by_filter_args deepCopy() { - return new get_part_specs_by_filter_args(this); + public get_partitions_by_filter_args deepCopy() { + return new get_partitions_by_filter_args(this); } @Override @@ -126489,7 +126332,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.filter = null; - this.max_parts = -1; + this.max_parts = (short)-1; } @@ -126562,11 +126405,11 @@ public void setFilterIsSet(boolean value) { } } - public int getMax_parts() { + public short getMax_parts() { return this.max_parts; } - public void setMax_parts(int max_parts) { + public void setMax_parts(short max_parts) { this.max_parts = max_parts; setMax_partsIsSet(true); } @@ -126614,7 +126457,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetMax_parts(); } else { - setMax_parts((Integer)value); + setMax_parts((Short)value); } break; @@ -126662,12 +126505,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_part_specs_by_filter_args) - return this.equals((get_part_specs_by_filter_args)that); + if (that instanceof get_partitions_by_filter_args) + return this.equals((get_partitions_by_filter_args)that); return false; } - public boolean equals(get_part_specs_by_filter_args that) { + public boolean equals(get_partitions_by_filter_args that) { if (that == null) return false; @@ -126738,7 +126581,7 @@ public int hashCode() { } @Override - public int compareTo(get_part_specs_by_filter_args other) { + public int compareTo(get_partitions_by_filter_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -126802,7 +126645,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_part_specs_by_filter_args("); + StringBuilder sb = new StringBuilder("get_partitions_by_filter_args("); boolean first = true; sb.append("db_name:"); @@ -126859,15 +126702,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_part_specs_by_filter_argsStandardSchemeFactory implements SchemeFactory { - public get_part_specs_by_filter_argsStandardScheme getScheme() { - return new get_part_specs_by_filter_argsStandardScheme(); + private static class get_partitions_by_filter_argsStandardSchemeFactory implements SchemeFactory { + public get_partitions_by_filter_argsStandardScheme getScheme() { + return new get_partitions_by_filter_argsStandardScheme(); } } - private static class get_part_specs_by_filter_argsStandardScheme extends StandardScheme { + private static class get_partitions_by_filter_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -126902,8 +126745,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f } break; case 4: // MAX_PARTS - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.max_parts = iprot.readI32(); + if (schemeField.type == org.apache.thrift.protocol.TType.I16) { + struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -126918,7 +126761,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -126938,7 +126781,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldEnd(); } oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); - oprot.writeI32(struct.max_parts); + oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -126946,16 +126789,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ } - private static class get_part_specs_by_filter_argsTupleSchemeFactory implements SchemeFactory { - public get_part_specs_by_filter_argsTupleScheme getScheme() { - return new get_part_specs_by_filter_argsTupleScheme(); + private static class get_partitions_by_filter_argsTupleSchemeFactory implements SchemeFactory { + public get_partitions_by_filter_argsTupleScheme getScheme() { + return new get_partitions_by_filter_argsTupleScheme(); } } - private static class get_part_specs_by_filter_argsTupleScheme extends TupleScheme { + private static class get_partitions_by_filter_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -126981,12 +126824,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f oprot.writeString(struct.filter); } if (struct.isSetMax_parts()) { - oprot.writeI32(struct.max_parts); + oprot.writeI16(struct.max_parts); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { @@ -127002,7 +126845,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi struct.setFilterIsSet(true); } if (incoming.get(3)) { - struct.max_parts = iprot.readI32(); + struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } } @@ -127010,8 +126853,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_part_specs_by_filter_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_filter_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -127019,11 +126862,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_part_specs_by_filter_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_part_specs_by_filter_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partitions_by_filter_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_by_filter_resultTupleSchemeFactory()); } - private List success; // required + private List success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -127097,20 +126940,20 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpec.class)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_result.class, metaDataMap); } - public get_part_specs_by_filter_result() { + public get_partitions_by_filter_result() { } - public get_part_specs_by_filter_result( - List success, + public get_partitions_by_filter_result( + List success, MetaException o1, NoSuchObjectException o2) { @@ -127123,11 +126966,11 @@ public get_part_specs_by_filter_result( /** * Performs a deep copy on other. */ - public get_part_specs_by_filter_result(get_part_specs_by_filter_result other) { + public get_partitions_by_filter_result(get_partitions_by_filter_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success.size()); - for (PartitionSpec other_element : other.success) { - __this__success.add(new PartitionSpec(other_element)); + List __this__success = new ArrayList(other.success.size()); + for (Partition other_element : other.success) { + __this__success.add(new Partition(other_element)); } this.success = __this__success; } @@ -127139,8 +126982,8 @@ public get_part_specs_by_filter_result(get_part_specs_by_filter_result other) { } } - public get_part_specs_by_filter_result deepCopy() { - return new get_part_specs_by_filter_result(this); + public get_partitions_by_filter_result deepCopy() { + return new get_partitions_by_filter_result(this); } @Override @@ -127154,22 +126997,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(PartitionSpec elem) { + public void addToSuccess(Partition elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -127240,7 +127083,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((List)value); } break; @@ -127299,12 +127142,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_part_specs_by_filter_result) - return this.equals((get_part_specs_by_filter_result)that); + if (that instanceof get_partitions_by_filter_result) + return this.equals((get_partitions_by_filter_result)that); return false; } - public boolean equals(get_part_specs_by_filter_result that) { + public boolean equals(get_partitions_by_filter_result that) { if (that == null) return false; @@ -127361,7 +127204,7 @@ public int hashCode() { } @Override - public int compareTo(get_part_specs_by_filter_result other) { + public int compareTo(get_partitions_by_filter_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -127415,7 +127258,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_part_specs_by_filter_result("); + StringBuilder sb = new StringBuilder("get_partitions_by_filter_result("); boolean first = true; sb.append("success:"); @@ -127466,15 +127309,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_part_specs_by_filter_resultStandardSchemeFactory implements SchemeFactory { - public get_part_specs_by_filter_resultStandardScheme getScheme() { - return new get_part_specs_by_filter_resultStandardScheme(); + private static class get_partitions_by_filter_resultStandardSchemeFactory implements SchemeFactory { + public get_partitions_by_filter_resultStandardScheme getScheme() { + return new get_partitions_by_filter_resultStandardScheme(); } } - private static class get_part_specs_by_filter_resultStandardScheme extends StandardScheme { + private static class get_partitions_by_filter_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -127488,11 +127331,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); - struct.success = new ArrayList(_list1606.size); - PartitionSpec _elem1607; + struct.success = new ArrayList(_list1606.size); + Partition _elem1607; for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) { - _elem1607 = new PartitionSpec(); + _elem1607 = new Partition(); _elem1607.read(iprot); struct.success.add(_elem1607); } @@ -127530,7 +127373,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -127538,7 +127381,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1609 : struct.success) + for (Partition _iter1609 : struct.success) { _iter1609.write(oprot); } @@ -127562,16 +127405,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ } - private static class get_part_specs_by_filter_resultTupleSchemeFactory implements SchemeFactory { - public get_part_specs_by_filter_resultTupleScheme getScheme() { - return new get_part_specs_by_filter_resultTupleScheme(); + private static class get_partitions_by_filter_resultTupleSchemeFactory implements SchemeFactory { + public get_partitions_by_filter_resultTupleScheme getScheme() { + return new get_partitions_by_filter_resultTupleScheme(); } } - private static class get_part_specs_by_filter_resultTupleScheme extends TupleScheme { + private static class get_partitions_by_filter_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -127587,7 +127430,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1610 : struct.success) + for (Partition _iter1610 : struct.success) { _iter1610.write(oprot); } @@ -127602,17 +127445,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1611.size); - PartitionSpec _elem1612; + struct.success = new ArrayList(_list1611.size); + Partition _elem1612; for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) { - _elem1612 = new PartitionSpec(); + _elem1612 = new Partition(); _elem1612.read(iprot); struct.success.add(_elem1612); } @@ -127634,22 +127477,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_part_specs_by_filter_args"); - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I32, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_by_expr_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_by_expr_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_part_specs_by_filter_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_part_specs_by_filter_argsTupleSchemeFactory()); } - private PartitionsByExprRequest req; // required + private String db_name; // required + private String tbl_name; // required + private String filter; // required + private int max_parts; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); + DB_NAME((short)1, "db_name"), + TBL_NAME((short)2, "tbl_name"), + FILTER((short)3, "filter"), + MAX_PARTS((short)4, "max_parts"); private static final Map byName = new HashMap(); @@ -127664,8 +127516,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // REQ - return REQ; + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // FILTER + return FILTER; + case 4: // MAX_PARTS + return MAX_PARTS; default: return null; } @@ -127706,73 +127564,194 @@ public String getFieldName() { } // isset id assignments + private static final int __MAX_PARTS_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsByExprRequest.class))); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_expr_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_args.class, metaDataMap); } - public get_partitions_by_expr_args() { + public get_part_specs_by_filter_args() { + this.max_parts = -1; + } - public get_partitions_by_expr_args( - PartitionsByExprRequest req) + public get_part_specs_by_filter_args( + String db_name, + String tbl_name, + String filter, + int max_parts) { this(); - this.req = req; + this.db_name = db_name; + this.tbl_name = tbl_name; + this.filter = filter; + this.max_parts = max_parts; + setMax_partsIsSet(true); } /** * Performs a deep copy on other. */ - public get_partitions_by_expr_args(get_partitions_by_expr_args other) { - if (other.isSetReq()) { - this.req = new PartitionsByExprRequest(other.req); + public get_part_specs_by_filter_args(get_part_specs_by_filter_args other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetFilter()) { + this.filter = other.filter; } + this.max_parts = other.max_parts; } - public get_partitions_by_expr_args deepCopy() { - return new get_partitions_by_expr_args(this); + public get_part_specs_by_filter_args deepCopy() { + return new get_part_specs_by_filter_args(this); } @Override public void clear() { - this.req = null; + this.db_name = null; + this.tbl_name = null; + this.filter = null; + this.max_parts = -1; + } - public PartitionsByExprRequest getReq() { - return this.req; + public String getDb_name() { + return this.db_name; } - public void setReq(PartitionsByExprRequest req) { - this.req = req; + public void setDb_name(String db_name) { + this.db_name = db_name; } - public void unsetReq() { - this.req = null; + public void unsetDb_name() { + this.db_name = null; } - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; } - public void setReqIsSet(boolean value) { + public void setDb_nameIsSet(boolean value) { if (!value) { - this.req = null; + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public String getFilter() { + return this.filter; + } + + public void setFilter(String filter) { + this.filter = filter; + } + + public void unsetFilter() { + this.filter = null; + } + + /** Returns true if field filter is set (has been assigned a value) and false otherwise */ + public boolean isSetFilter() { + return this.filter != null; + } + + public void setFilterIsSet(boolean value) { + if (!value) { + this.filter = null; } } + public int getMax_parts() { + return this.max_parts; + } + + public void setMax_parts(int max_parts) { + this.max_parts = max_parts; + setMax_partsIsSet(true); + } + + public void unsetMax_parts() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_PARTS_ISSET_ID); + } + + /** Returns true if field max_parts is set (has been assigned a value) and false otherwise */ + public boolean isSetMax_parts() { + return EncodingUtils.testBit(__isset_bitfield, __MAX_PARTS_ISSET_ID); + } + + public void setMax_partsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { - case REQ: + case DB_NAME: if (value == null) { - unsetReq(); + unsetDb_name(); } else { - setReq((PartitionsByExprRequest)value); + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case FILTER: + if (value == null) { + unsetFilter(); + } else { + setFilter((String)value); + } + break; + + case MAX_PARTS: + if (value == null) { + unsetMax_parts(); + } else { + setMax_parts((Integer)value); } break; @@ -127781,8 +127760,17 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case REQ: - return getReq(); + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case FILTER: + return getFilter(); + + case MAX_PARTS: + return getMax_parts(); } throw new IllegalStateException(); @@ -127795,8 +127783,14 @@ public boolean isSet(_Fields field) { } switch (field) { - case REQ: - return isSetReq(); + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case FILTER: + return isSetFilter(); + case MAX_PARTS: + return isSetMax_parts(); } throw new IllegalStateException(); } @@ -127805,21 +127799,48 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partitions_by_expr_args) - return this.equals((get_partitions_by_expr_args)that); + if (that instanceof get_part_specs_by_filter_args) + return this.equals((get_part_specs_by_filter_args)that); return false; } - public boolean equals(get_partitions_by_expr_args that) { + public boolean equals(get_part_specs_by_filter_args that) { if (that == null) return false; - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) return false; - if (!this.req.equals(that.req)) + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_filter = true && this.isSetFilter(); + boolean that_present_filter = true && that.isSetFilter(); + if (this_present_filter || that_present_filter) { + if (!(this_present_filter && that_present_filter)) + return false; + if (!this.filter.equals(that.filter)) + return false; + } + + boolean this_present_max_parts = true; + boolean that_present_max_parts = true; + if (this_present_max_parts || that_present_max_parts) { + if (!(this_present_max_parts && that_present_max_parts)) + return false; + if (this.max_parts != that.max_parts) return false; } @@ -127830,28 +127851,73 @@ public boolean equals(get_partitions_by_expr_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + boolean present_filter = true && (isSetFilter()); + list.add(present_filter); + if (present_filter) + list.add(filter); + + boolean present_max_parts = true; + list.add(present_max_parts); + if (present_max_parts) + list.add(max_parts); return list.hashCode(); } @Override - public int compareTo(get_partitions_by_expr_args other) { + public int compareTo(get_part_specs_by_filter_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFilter()).compareTo(other.isSetFilter()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFilter()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filter, other.filter); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMax_parts()).compareTo(other.isSetMax_parts()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMax_parts()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_parts, other.max_parts); if (lastComparison != 0) { return lastComparison; } @@ -127873,16 +127939,36 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partitions_by_expr_args("); + StringBuilder sb = new StringBuilder("get_part_specs_by_filter_args("); boolean first = true; - sb.append("req:"); - if (this.req == null) { + sb.append("db_name:"); + if (this.db_name == null) { sb.append("null"); } else { - sb.append(this.req); + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("filter:"); + if (this.filter == null) { + sb.append("null"); + } else { + sb.append(this.filter); } first = false; + if (!first) sb.append(", "); + sb.append("max_parts:"); + sb.append(this.max_parts); + first = false; sb.append(")"); return sb.toString(); } @@ -127890,9 +127976,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (req != null) { - req.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -127905,21 +127988,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_partitions_by_expr_argsStandardSchemeFactory implements SchemeFactory { - public get_partitions_by_expr_argsStandardScheme getScheme() { - return new get_partitions_by_expr_argsStandardScheme(); + private static class get_part_specs_by_filter_argsStandardSchemeFactory implements SchemeFactory { + public get_part_specs_by_filter_argsStandardScheme getScheme() { + return new get_part_specs_by_filter_argsStandardScheme(); } } - private static class get_partitions_by_expr_argsStandardScheme extends StandardScheme { + private static class get_part_specs_by_filter_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -127929,11 +128014,34 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_e break; } switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new PartitionsByExprRequest(); - struct.req.read(iprot); - struct.setReqIsSet(true); + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // FILTER + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.filter = iprot.readString(); + struct.setFilterIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // MAX_PARTS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.max_parts = iprot.readI32(); + struct.setMax_partsIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -127947,70 +128055,1099 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_e struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); oprot.writeFieldEnd(); } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.filter != null) { + oprot.writeFieldBegin(FILTER_FIELD_DESC); + oprot.writeString(struct.filter); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); + oprot.writeI32(struct.max_parts); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_partitions_by_expr_argsTupleSchemeFactory implements SchemeFactory { - public get_partitions_by_expr_argsTupleScheme getScheme() { - return new get_partitions_by_expr_argsTupleScheme(); + private static class get_part_specs_by_filter_argsTupleSchemeFactory implements SchemeFactory { + public get_part_specs_by_filter_argsTupleScheme getScheme() { + return new get_part_specs_by_filter_argsTupleScheme(); } } - private static class get_partitions_by_expr_argsTupleScheme extends TupleScheme { + private static class get_part_specs_by_filter_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetReq()) { + if (struct.isSetDb_name()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); + if (struct.isSetTbl_name()) { + optionals.set(1); + } + if (struct.isSetFilter()) { + optionals.set(2); + } + if (struct.isSetMax_parts()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); + } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetFilter()) { + oprot.writeString(struct.filter); + } + if (struct.isSetMax_parts()) { + oprot.writeI32(struct.max_parts); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.req = new PartitionsByExprRequest(); - struct.req.read(iprot); - struct.setReqIsSet(true); + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + struct.filter = iprot.readString(); + struct.setFilterIsSet(true); + } + if (incoming.get(3)) { + struct.max_parts = iprot.readI32(); + struct.setMax_partsIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_part_specs_by_filter_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_by_expr_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_by_expr_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_part_specs_by_filter_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_part_specs_by_filter_resultTupleSchemeFactory()); } - private PartitionsByExprResult success; // required + private List success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpec.class)))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_result.class, metaDataMap); + } + + public get_part_specs_by_filter_result() { + } + + public get_part_specs_by_filter_result( + List success, + MetaException o1, + NoSuchObjectException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_part_specs_by_filter_result(get_part_specs_by_filter_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success.size()); + for (PartitionSpec other_element : other.success) { + __this__success.add(new PartitionSpec(other_element)); + } + this.success = __this__success; + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new NoSuchObjectException(other.o2); + } + } + + public get_part_specs_by_filter_result deepCopy() { + return new get_part_specs_by_filter_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(PartitionSpec elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchObjectException getO2() { + return this.o2; + } + + public void setO2(NoSuchObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((NoSuchObjectException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_part_specs_by_filter_result) + return this.equals((get_part_specs_by_filter_result)that); + return false; + } + + public boolean equals(get_part_specs_by_filter_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_part_specs_by_filter_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_part_specs_by_filter_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_part_specs_by_filter_resultStandardSchemeFactory implements SchemeFactory { + public get_part_specs_by_filter_resultStandardScheme getScheme() { + return new get_part_specs_by_filter_resultStandardScheme(); + } + } + + private static class get_part_specs_by_filter_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); + struct.success = new ArrayList(_list1614.size); + PartitionSpec _elem1615; + for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) + { + _elem1615 = new PartitionSpec(); + _elem1615.read(iprot); + struct.success.add(_elem1615); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new NoSuchObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (PartitionSpec _iter1617 : struct.success) + { + _iter1617.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_part_specs_by_filter_resultTupleSchemeFactory implements SchemeFactory { + public get_part_specs_by_filter_resultTupleScheme getScheme() { + return new get_part_specs_by_filter_resultTupleScheme(); + } + } + + private static class get_part_specs_by_filter_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (PartitionSpec _iter1618 : struct.success) + { + _iter1618.write(oprot); + } + } + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1619.size); + PartitionSpec _elem1620; + for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) + { + _elem1620 = new PartitionSpec(); + _elem1620.read(iprot); + struct.success.add(_elem1620); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new NoSuchObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_args"); + + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_partitions_by_expr_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_by_expr_argsTupleSchemeFactory()); + } + + private PartitionsByExprRequest req; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQ((short)1, "req"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQ + return REQ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsByExprRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_expr_args.class, metaDataMap); + } + + public get_partitions_by_expr_args() { + } + + public get_partitions_by_expr_args( + PartitionsByExprRequest req) + { + this(); + this.req = req; + } + + /** + * Performs a deep copy on other. + */ + public get_partitions_by_expr_args(get_partitions_by_expr_args other) { + if (other.isSetReq()) { + this.req = new PartitionsByExprRequest(other.req); + } + } + + public get_partitions_by_expr_args deepCopy() { + return new get_partitions_by_expr_args(this); + } + + @Override + public void clear() { + this.req = null; + } + + public PartitionsByExprRequest getReq() { + return this.req; + } + + public void setReq(PartitionsByExprRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((PartitionsByExprRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQ: + return getReq(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQ: + return isSetReq(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_partitions_by_expr_args) + return this.equals((get_partitions_by_expr_args)that); + return false; + } + + public boolean equals(get_partitions_by_expr_args that) { + if (that == null) + return false; + + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + + return list.hashCode(); + } + + @Override + public int compareTo(get_partitions_by_expr_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_partitions_by_expr_args("); + boolean first = true; + + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (req != null) { + req.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_partitions_by_expr_argsStandardSchemeFactory implements SchemeFactory { + public get_partitions_by_expr_argsStandardScheme getScheme() { + return new get_partitions_by_expr_argsStandardScheme(); + } + } + + private static class get_partitions_by_expr_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new PartitionsByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_partitions_by_expr_argsTupleSchemeFactory implements SchemeFactory { + public get_partitions_by_expr_argsTupleScheme getScheme() { + return new get_partitions_by_expr_argsTupleScheme(); + } + } + + private static class get_partitions_by_expr_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new PartitionsByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_partitions_by_expr_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_by_expr_resultTupleSchemeFactory()); + } + + private PartitionsByExprResult success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -130198,13 +131335,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); - struct.names = new ArrayList(_list1614.size); - String _elem1615; - for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) + org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); + struct.names = new ArrayList(_list1622.size); + String _elem1623; + for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) { - _elem1615 = iprot.readString(); - struct.names.add(_elem1615); + _elem1623 = iprot.readString(); + struct.names.add(_elem1623); } iprot.readListEnd(); } @@ -130240,9 +131377,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1617 : struct.names) + for (String _iter1625 : struct.names) { - oprot.writeString(_iter1617); + oprot.writeString(_iter1625); } oprot.writeListEnd(); } @@ -130285,9 +131422,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1618 : struct.names) + for (String _iter1626 : struct.names) { - oprot.writeString(_iter1618); + oprot.writeString(_iter1626); } } } @@ -130307,13 +131444,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1619.size); - String _elem1620; - for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) + org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1627.size); + String _elem1628; + for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) { - _elem1620 = iprot.readString(); - struct.names.add(_elem1620); + _elem1628 = iprot.readString(); + struct.names.add(_elem1628); } } struct.setNamesIsSet(true); @@ -130800,14 +131937,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); - struct.success = new ArrayList(_list1622.size); - Partition _elem1623; - for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) + org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); + struct.success = new ArrayList(_list1630.size); + Partition _elem1631; + for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) { - _elem1623 = new Partition(); - _elem1623.read(iprot); - struct.success.add(_elem1623); + _elem1631 = new Partition(); + _elem1631.read(iprot); + struct.success.add(_elem1631); } iprot.readListEnd(); } @@ -130851,9 +131988,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1625 : struct.success) + for (Partition _iter1633 : struct.success) { - _iter1625.write(oprot); + _iter1633.write(oprot); } oprot.writeListEnd(); } @@ -130900,9 +132037,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1626 : struct.success) + for (Partition _iter1634 : struct.success) { - _iter1626.write(oprot); + _iter1634.write(oprot); } } } @@ -130920,14 +132057,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1627.size); - Partition _elem1628; - for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) + org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1635.size); + Partition _elem1636; + for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) { - _elem1628 = new Partition(); - _elem1628.read(iprot); - struct.success.add(_elem1628); + _elem1636 = new Partition(); + _elem1636.read(iprot); + struct.success.add(_elem1636); } } struct.setSuccessIsSet(true); @@ -133415,14 +134552,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1630.size); - Partition _elem1631; - for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) + org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1638.size); + Partition _elem1639; + for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) { - _elem1631 = new Partition(); - _elem1631.read(iprot); - struct.new_parts.add(_elem1631); + _elem1639 = new Partition(); + _elem1639.read(iprot); + struct.new_parts.add(_elem1639); } iprot.readListEnd(); } @@ -133458,9 +134595,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1633 : struct.new_parts) + for (Partition _iter1641 : struct.new_parts) { - _iter1633.write(oprot); + _iter1641.write(oprot); } oprot.writeListEnd(); } @@ -133503,9 +134640,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1634 : struct.new_parts) + for (Partition _iter1642 : struct.new_parts) { - _iter1634.write(oprot); + _iter1642.write(oprot); } } } @@ -133525,14 +134662,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1635.size); - Partition _elem1636; - for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) + org.apache.thrift.protocol.TList _list1643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1643.size); + Partition _elem1644; + for (int _i1645 = 0; _i1645 < _list1643.size; ++_i1645) { - _elem1636 = new Partition(); - _elem1636.read(iprot); - struct.new_parts.add(_elem1636); + _elem1644 = new Partition(); + _elem1644.read(iprot); + struct.new_parts.add(_elem1644); } } struct.setNew_partsIsSet(true); @@ -134585,14 +135722,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1638.size); - Partition _elem1639; - for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) + org.apache.thrift.protocol.TList _list1646 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1646.size); + Partition _elem1647; + for (int _i1648 = 0; _i1648 < _list1646.size; ++_i1648) { - _elem1639 = new Partition(); - _elem1639.read(iprot); - struct.new_parts.add(_elem1639); + _elem1647 = new Partition(); + _elem1647.read(iprot); + struct.new_parts.add(_elem1647); } iprot.readListEnd(); } @@ -134637,9 +135774,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1641 : struct.new_parts) + for (Partition _iter1649 : struct.new_parts) { - _iter1641.write(oprot); + _iter1649.write(oprot); } oprot.writeListEnd(); } @@ -134690,9 +135827,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1642 : struct.new_parts) + for (Partition _iter1650 : struct.new_parts) { - _iter1642.write(oprot); + _iter1650.write(oprot); } } } @@ -134715,14 +135852,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1643.size); - Partition _elem1644; - for (int _i1645 = 0; _i1645 < _list1643.size; ++_i1645) + org.apache.thrift.protocol.TList _list1651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1651.size); + Partition _elem1652; + for (int _i1653 = 0; _i1653 < _list1651.size; ++_i1653) { - _elem1644 = new Partition(); - _elem1644.read(iprot); - struct.new_parts.add(_elem1644); + _elem1652 = new Partition(); + _elem1652.read(iprot); + struct.new_parts.add(_elem1652); } } struct.setNew_partsIsSet(true); @@ -137861,13 +138998,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1646 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1646.size); - String _elem1647; - for (int _i1648 = 0; _i1648 < _list1646.size; ++_i1648) + org.apache.thrift.protocol.TList _list1654 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1654.size); + String _elem1655; + for (int _i1656 = 0; _i1656 < _list1654.size; ++_i1656) { - _elem1647 = iprot.readString(); - struct.part_vals.add(_elem1647); + _elem1655 = iprot.readString(); + struct.part_vals.add(_elem1655); } iprot.readListEnd(); } @@ -137912,9 +139049,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1649 : struct.part_vals) + for (String _iter1657 : struct.part_vals) { - oprot.writeString(_iter1649); + oprot.writeString(_iter1657); } oprot.writeListEnd(); } @@ -137965,9 +139102,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1650 : struct.part_vals) + for (String _iter1658 : struct.part_vals) { - oprot.writeString(_iter1650); + oprot.writeString(_iter1658); } } } @@ -137990,13 +139127,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1651.size); - String _elem1652; - for (int _i1653 = 0; _i1653 < _list1651.size; ++_i1653) + org.apache.thrift.protocol.TList _list1659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1659.size); + String _elem1660; + for (int _i1661 = 0; _i1661 < _list1659.size; ++_i1661) { - _elem1652 = iprot.readString(); - struct.part_vals.add(_elem1652); + _elem1660 = iprot.readString(); + struct.part_vals.add(_elem1660); } } struct.setPart_valsIsSet(true); @@ -139808,13 +140945,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1654 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1654.size); - String _elem1655; - for (int _i1656 = 0; _i1656 < _list1654.size; ++_i1656) + org.apache.thrift.protocol.TList _list1662 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1662.size); + String _elem1663; + for (int _i1664 = 0; _i1664 < _list1662.size; ++_i1664) { - _elem1655 = iprot.readString(); - struct.part_vals.add(_elem1655); + _elem1663 = iprot.readString(); + struct.part_vals.add(_elem1663); } iprot.readListEnd(); } @@ -139848,9 +140985,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1657 : struct.part_vals) + for (String _iter1665 : struct.part_vals) { - oprot.writeString(_iter1657); + oprot.writeString(_iter1665); } oprot.writeListEnd(); } @@ -139887,9 +141024,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1658 : struct.part_vals) + for (String _iter1666 : struct.part_vals) { - oprot.writeString(_iter1658); + oprot.writeString(_iter1666); } } } @@ -139904,13 +141041,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1659.size); - String _elem1660; - for (int _i1661 = 0; _i1661 < _list1659.size; ++_i1661) + org.apache.thrift.protocol.TList _list1667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1667.size); + String _elem1668; + for (int _i1669 = 0; _i1669 < _list1667.size; ++_i1669) { - _elem1660 = iprot.readString(); - struct.part_vals.add(_elem1660); + _elem1668 = iprot.readString(); + struct.part_vals.add(_elem1668); } } struct.setPart_valsIsSet(true); @@ -142065,13 +143202,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1662 = iprot.readListBegin(); - struct.success = new ArrayList(_list1662.size); - String _elem1663; - for (int _i1664 = 0; _i1664 < _list1662.size; ++_i1664) + org.apache.thrift.protocol.TList _list1670 = iprot.readListBegin(); + struct.success = new ArrayList(_list1670.size); + String _elem1671; + for (int _i1672 = 0; _i1672 < _list1670.size; ++_i1672) { - _elem1663 = iprot.readString(); - struct.success.add(_elem1663); + _elem1671 = iprot.readString(); + struct.success.add(_elem1671); } iprot.readListEnd(); } @@ -142106,9 +143243,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1665 : struct.success) + for (String _iter1673 : struct.success) { - oprot.writeString(_iter1665); + oprot.writeString(_iter1673); } oprot.writeListEnd(); } @@ -142147,9 +143284,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1666 : struct.success) + for (String _iter1674 : struct.success) { - oprot.writeString(_iter1666); + oprot.writeString(_iter1674); } } } @@ -142164,13 +143301,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1667.size); - String _elem1668; - for (int _i1669 = 0; _i1669 < _list1667.size; ++_i1669) + org.apache.thrift.protocol.TList _list1675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1675.size); + String _elem1676; + for (int _i1677 = 0; _i1677 < _list1675.size; ++_i1677) { - _elem1668 = iprot.readString(); - struct.success.add(_elem1668); + _elem1676 = iprot.readString(); + struct.success.add(_elem1676); } } struct.setSuccessIsSet(true); @@ -142933,15 +144070,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1670 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1670.size); - String _key1671; - String _val1672; - for (int _i1673 = 0; _i1673 < _map1670.size; ++_i1673) + org.apache.thrift.protocol.TMap _map1678 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1678.size); + String _key1679; + String _val1680; + for (int _i1681 = 0; _i1681 < _map1678.size; ++_i1681) { - _key1671 = iprot.readString(); - _val1672 = iprot.readString(); - struct.success.put(_key1671, _val1672); + _key1679 = iprot.readString(); + _val1680 = iprot.readString(); + struct.success.put(_key1679, _val1680); } iprot.readMapEnd(); } @@ -142976,10 +144113,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1674 : struct.success.entrySet()) + for (Map.Entry _iter1682 : struct.success.entrySet()) { - oprot.writeString(_iter1674.getKey()); - oprot.writeString(_iter1674.getValue()); + oprot.writeString(_iter1682.getKey()); + oprot.writeString(_iter1682.getValue()); } oprot.writeMapEnd(); } @@ -143018,10 +144155,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1675 : struct.success.entrySet()) + for (Map.Entry _iter1683 : struct.success.entrySet()) { - oprot.writeString(_iter1675.getKey()); - oprot.writeString(_iter1675.getValue()); + oprot.writeString(_iter1683.getKey()); + oprot.writeString(_iter1683.getValue()); } } } @@ -143036,15 +144173,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1676 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1676.size); - String _key1677; - String _val1678; - for (int _i1679 = 0; _i1679 < _map1676.size; ++_i1679) + org.apache.thrift.protocol.TMap _map1684 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1684.size); + String _key1685; + String _val1686; + for (int _i1687 = 0; _i1687 < _map1684.size; ++_i1687) { - _key1677 = iprot.readString(); - _val1678 = iprot.readString(); - struct.success.put(_key1677, _val1678); + _key1685 = iprot.readString(); + _val1686 = iprot.readString(); + struct.success.put(_key1685, _val1686); } } struct.setSuccessIsSet(true); @@ -143639,15 +144776,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1680 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1680.size); - String _key1681; - String _val1682; - for (int _i1683 = 0; _i1683 < _map1680.size; ++_i1683) + org.apache.thrift.protocol.TMap _map1688 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1688.size); + String _key1689; + String _val1690; + for (int _i1691 = 0; _i1691 < _map1688.size; ++_i1691) { - _key1681 = iprot.readString(); - _val1682 = iprot.readString(); - struct.part_vals.put(_key1681, _val1682); + _key1689 = iprot.readString(); + _val1690 = iprot.readString(); + struct.part_vals.put(_key1689, _val1690); } iprot.readMapEnd(); } @@ -143691,10 +144828,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1684 : struct.part_vals.entrySet()) + for (Map.Entry _iter1692 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1684.getKey()); - oprot.writeString(_iter1684.getValue()); + oprot.writeString(_iter1692.getKey()); + oprot.writeString(_iter1692.getValue()); } oprot.writeMapEnd(); } @@ -143745,10 +144882,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1685 : struct.part_vals.entrySet()) + for (Map.Entry _iter1693 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1685.getKey()); - oprot.writeString(_iter1685.getValue()); + oprot.writeString(_iter1693.getKey()); + oprot.writeString(_iter1693.getValue()); } } } @@ -143771,15 +144908,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1686 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1686.size); - String _key1687; - String _val1688; - for (int _i1689 = 0; _i1689 < _map1686.size; ++_i1689) + org.apache.thrift.protocol.TMap _map1694 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1694.size); + String _key1695; + String _val1696; + for (int _i1697 = 0; _i1697 < _map1694.size; ++_i1697) { - _key1687 = iprot.readString(); - _val1688 = iprot.readString(); - struct.part_vals.put(_key1687, _val1688); + _key1695 = iprot.readString(); + _val1696 = iprot.readString(); + struct.part_vals.put(_key1695, _val1696); } } struct.setPart_valsIsSet(true); @@ -145263,15 +146400,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1690 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1690.size); - String _key1691; - String _val1692; - for (int _i1693 = 0; _i1693 < _map1690.size; ++_i1693) + org.apache.thrift.protocol.TMap _map1698 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1698.size); + String _key1699; + String _val1700; + for (int _i1701 = 0; _i1701 < _map1698.size; ++_i1701) { - _key1691 = iprot.readString(); - _val1692 = iprot.readString(); - struct.part_vals.put(_key1691, _val1692); + _key1699 = iprot.readString(); + _val1700 = iprot.readString(); + struct.part_vals.put(_key1699, _val1700); } iprot.readMapEnd(); } @@ -145315,10 +146452,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1694 : struct.part_vals.entrySet()) + for (Map.Entry _iter1702 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1694.getKey()); - oprot.writeString(_iter1694.getValue()); + oprot.writeString(_iter1702.getKey()); + oprot.writeString(_iter1702.getValue()); } oprot.writeMapEnd(); } @@ -145369,10 +146506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1695 : struct.part_vals.entrySet()) + for (Map.Entry _iter1703 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1695.getKey()); - oprot.writeString(_iter1695.getValue()); + oprot.writeString(_iter1703.getKey()); + oprot.writeString(_iter1703.getValue()); } } } @@ -145395,15 +146532,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1696 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1696.size); - String _key1697; - String _val1698; - for (int _i1699 = 0; _i1699 < _map1696.size; ++_i1699) + org.apache.thrift.protocol.TMap _map1704 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1704.size); + String _key1705; + String _val1706; + for (int _i1707 = 0; _i1707 < _map1704.size; ++_i1707) { - _key1697 = iprot.readString(); - _val1698 = iprot.readString(); - struct.part_vals.put(_key1697, _val1698); + _key1705 = iprot.readString(); + _val1706 = iprot.readString(); + struct.part_vals.put(_key1705, _val1706); } } struct.setPart_valsIsSet(true); @@ -170267,13 +171404,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1700 = iprot.readListBegin(); - struct.success = new ArrayList(_list1700.size); - String _elem1701; - for (int _i1702 = 0; _i1702 < _list1700.size; ++_i1702) + org.apache.thrift.protocol.TList _list1708 = iprot.readListBegin(); + struct.success = new ArrayList(_list1708.size); + String _elem1709; + for (int _i1710 = 0; _i1710 < _list1708.size; ++_i1710) { - _elem1701 = iprot.readString(); - struct.success.add(_elem1701); + _elem1709 = iprot.readString(); + struct.success.add(_elem1709); } iprot.readListEnd(); } @@ -170308,9 +171445,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1703 : struct.success) + for (String _iter1711 : struct.success) { - oprot.writeString(_iter1703); + oprot.writeString(_iter1711); } oprot.writeListEnd(); } @@ -170349,9 +171486,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1704 : struct.success) + for (String _iter1712 : struct.success) { - oprot.writeString(_iter1704); + oprot.writeString(_iter1712); } } } @@ -170366,13 +171503,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1705 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1705.size); - String _elem1706; - for (int _i1707 = 0; _i1707 < _list1705.size; ++_i1707) + org.apache.thrift.protocol.TList _list1713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1713.size); + String _elem1714; + for (int _i1715 = 0; _i1715 < _list1713.size; ++_i1715) { - _elem1706 = iprot.readString(); - struct.success.add(_elem1706); + _elem1714 = iprot.readString(); + struct.success.add(_elem1714); } } struct.setSuccessIsSet(true); @@ -174427,13 +175564,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1708 = iprot.readListBegin(); - struct.success = new ArrayList(_list1708.size); - String _elem1709; - for (int _i1710 = 0; _i1710 < _list1708.size; ++_i1710) + org.apache.thrift.protocol.TList _list1716 = iprot.readListBegin(); + struct.success = new ArrayList(_list1716.size); + String _elem1717; + for (int _i1718 = 0; _i1718 < _list1716.size; ++_i1718) { - _elem1709 = iprot.readString(); - struct.success.add(_elem1709); + _elem1717 = iprot.readString(); + struct.success.add(_elem1717); } iprot.readListEnd(); } @@ -174468,9 +175605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1711 : struct.success) + for (String _iter1719 : struct.success) { - oprot.writeString(_iter1711); + oprot.writeString(_iter1719); } oprot.writeListEnd(); } @@ -174509,9 +175646,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1712 : struct.success) + for (String _iter1720 : struct.success) { - oprot.writeString(_iter1712); + oprot.writeString(_iter1720); } } } @@ -174526,13 +175663,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1713.size); - String _elem1714; - for (int _i1715 = 0; _i1715 < _list1713.size; ++_i1715) + org.apache.thrift.protocol.TList _list1721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1721.size); + String _elem1722; + for (int _i1723 = 0; _i1723 < _list1721.size; ++_i1723) { - _elem1714 = iprot.readString(); - struct.success.add(_elem1714); + _elem1722 = iprot.readString(); + struct.success.add(_elem1722); } } struct.setSuccessIsSet(true); @@ -177823,14 +178960,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1716 = iprot.readListBegin(); - struct.success = new ArrayList(_list1716.size); - Role _elem1717; - for (int _i1718 = 0; _i1718 < _list1716.size; ++_i1718) + org.apache.thrift.protocol.TList _list1724 = iprot.readListBegin(); + struct.success = new ArrayList(_list1724.size); + Role _elem1725; + for (int _i1726 = 0; _i1726 < _list1724.size; ++_i1726) { - _elem1717 = new Role(); - _elem1717.read(iprot); - struct.success.add(_elem1717); + _elem1725 = new Role(); + _elem1725.read(iprot); + struct.success.add(_elem1725); } iprot.readListEnd(); } @@ -177865,9 +179002,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1719 : struct.success) + for (Role _iter1727 : struct.success) { - _iter1719.write(oprot); + _iter1727.write(oprot); } oprot.writeListEnd(); } @@ -177906,9 +179043,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1720 : struct.success) + for (Role _iter1728 : struct.success) { - _iter1720.write(oprot); + _iter1728.write(oprot); } } } @@ -177923,14 +179060,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1721.size); - Role _elem1722; - for (int _i1723 = 0; _i1723 < _list1721.size; ++_i1723) + org.apache.thrift.protocol.TList _list1729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1729.size); + Role _elem1730; + for (int _i1731 = 0; _i1731 < _list1729.size; ++_i1731) { - _elem1722 = new Role(); - _elem1722.read(iprot); - struct.success.add(_elem1722); + _elem1730 = new Role(); + _elem1730.read(iprot); + struct.success.add(_elem1730); } } struct.setSuccessIsSet(true); @@ -180935,13 +182072,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1724 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1724.size); - String _elem1725; - for (int _i1726 = 0; _i1726 < _list1724.size; ++_i1726) + org.apache.thrift.protocol.TList _list1732 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1732.size); + String _elem1733; + for (int _i1734 = 0; _i1734 < _list1732.size; ++_i1734) { - _elem1725 = iprot.readString(); - struct.group_names.add(_elem1725); + _elem1733 = iprot.readString(); + struct.group_names.add(_elem1733); } iprot.readListEnd(); } @@ -180977,9 +182114,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1727 : struct.group_names) + for (String _iter1735 : struct.group_names) { - oprot.writeString(_iter1727); + oprot.writeString(_iter1735); } oprot.writeListEnd(); } @@ -181022,9 +182159,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1728 : struct.group_names) + for (String _iter1736 : struct.group_names) { - oprot.writeString(_iter1728); + oprot.writeString(_iter1736); } } } @@ -181045,13 +182182,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1729.size); - String _elem1730; - for (int _i1731 = 0; _i1731 < _list1729.size; ++_i1731) + org.apache.thrift.protocol.TList _list1737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1737.size); + String _elem1738; + for (int _i1739 = 0; _i1739 < _list1737.size; ++_i1739) { - _elem1730 = iprot.readString(); - struct.group_names.add(_elem1730); + _elem1738 = iprot.readString(); + struct.group_names.add(_elem1738); } } struct.setGroup_namesIsSet(true); @@ -182509,14 +183646,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1732 = iprot.readListBegin(); - struct.success = new ArrayList(_list1732.size); - HiveObjectPrivilege _elem1733; - for (int _i1734 = 0; _i1734 < _list1732.size; ++_i1734) + org.apache.thrift.protocol.TList _list1740 = iprot.readListBegin(); + struct.success = new ArrayList(_list1740.size); + HiveObjectPrivilege _elem1741; + for (int _i1742 = 0; _i1742 < _list1740.size; ++_i1742) { - _elem1733 = new HiveObjectPrivilege(); - _elem1733.read(iprot); - struct.success.add(_elem1733); + _elem1741 = new HiveObjectPrivilege(); + _elem1741.read(iprot); + struct.success.add(_elem1741); } iprot.readListEnd(); } @@ -182551,9 +183688,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1735 : struct.success) + for (HiveObjectPrivilege _iter1743 : struct.success) { - _iter1735.write(oprot); + _iter1743.write(oprot); } oprot.writeListEnd(); } @@ -182592,9 +183729,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1736 : struct.success) + for (HiveObjectPrivilege _iter1744 : struct.success) { - _iter1736.write(oprot); + _iter1744.write(oprot); } } } @@ -182609,14 +183746,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1737.size); - HiveObjectPrivilege _elem1738; - for (int _i1739 = 0; _i1739 < _list1737.size; ++_i1739) + org.apache.thrift.protocol.TList _list1745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1745.size); + HiveObjectPrivilege _elem1746; + for (int _i1747 = 0; _i1747 < _list1745.size; ++_i1747) { - _elem1738 = new HiveObjectPrivilege(); - _elem1738.read(iprot); - struct.success.add(_elem1738); + _elem1746 = new HiveObjectPrivilege(); + _elem1746.read(iprot); + struct.success.add(_elem1746); } } struct.setSuccessIsSet(true); @@ -186563,13 +187700,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1740 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1740.size); - String _elem1741; - for (int _i1742 = 0; _i1742 < _list1740.size; ++_i1742) + org.apache.thrift.protocol.TList _list1748 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1748.size); + String _elem1749; + for (int _i1750 = 0; _i1750 < _list1748.size; ++_i1750) { - _elem1741 = iprot.readString(); - struct.group_names.add(_elem1741); + _elem1749 = iprot.readString(); + struct.group_names.add(_elem1749); } iprot.readListEnd(); } @@ -186600,9 +187737,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1743 : struct.group_names) + for (String _iter1751 : struct.group_names) { - oprot.writeString(_iter1743); + oprot.writeString(_iter1751); } oprot.writeListEnd(); } @@ -186639,9 +187776,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1744 : struct.group_names) + for (String _iter1752 : struct.group_names) { - oprot.writeString(_iter1744); + oprot.writeString(_iter1752); } } } @@ -186657,13 +187794,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1745.size); - String _elem1746; - for (int _i1747 = 0; _i1747 < _list1745.size; ++_i1747) + org.apache.thrift.protocol.TList _list1753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1753.size); + String _elem1754; + for (int _i1755 = 0; _i1755 < _list1753.size; ++_i1755) { - _elem1746 = iprot.readString(); - struct.group_names.add(_elem1746); + _elem1754 = iprot.readString(); + struct.group_names.add(_elem1754); } } struct.setGroup_namesIsSet(true); @@ -187066,13 +188203,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1748 = iprot.readListBegin(); - struct.success = new ArrayList(_list1748.size); - String _elem1749; - for (int _i1750 = 0; _i1750 < _list1748.size; ++_i1750) + org.apache.thrift.protocol.TList _list1756 = iprot.readListBegin(); + struct.success = new ArrayList(_list1756.size); + String _elem1757; + for (int _i1758 = 0; _i1758 < _list1756.size; ++_i1758) { - _elem1749 = iprot.readString(); - struct.success.add(_elem1749); + _elem1757 = iprot.readString(); + struct.success.add(_elem1757); } iprot.readListEnd(); } @@ -187107,9 +188244,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1751 : struct.success) + for (String _iter1759 : struct.success) { - oprot.writeString(_iter1751); + oprot.writeString(_iter1759); } oprot.writeListEnd(); } @@ -187148,9 +188285,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1752 : struct.success) + for (String _iter1760 : struct.success) { - oprot.writeString(_iter1752); + oprot.writeString(_iter1760); } } } @@ -187165,13 +188302,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1753.size); - String _elem1754; - for (int _i1755 = 0; _i1755 < _list1753.size; ++_i1755) + org.apache.thrift.protocol.TList _list1761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1761.size); + String _elem1762; + for (int _i1763 = 0; _i1763 < _list1761.size; ++_i1763) { - _elem1754 = iprot.readString(); - struct.success.add(_elem1754); + _elem1762 = iprot.readString(); + struct.success.add(_elem1762); } } struct.setSuccessIsSet(true); @@ -192462,13 +193599,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1756 = iprot.readListBegin(); - struct.success = new ArrayList(_list1756.size); - String _elem1757; - for (int _i1758 = 0; _i1758 < _list1756.size; ++_i1758) + org.apache.thrift.protocol.TList _list1764 = iprot.readListBegin(); + struct.success = new ArrayList(_list1764.size); + String _elem1765; + for (int _i1766 = 0; _i1766 < _list1764.size; ++_i1766) { - _elem1757 = iprot.readString(); - struct.success.add(_elem1757); + _elem1765 = iprot.readString(); + struct.success.add(_elem1765); } iprot.readListEnd(); } @@ -192494,9 +193631,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1759 : struct.success) + for (String _iter1767 : struct.success) { - oprot.writeString(_iter1759); + oprot.writeString(_iter1767); } oprot.writeListEnd(); } @@ -192527,9 +193664,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1760 : struct.success) + for (String _iter1768 : struct.success) { - oprot.writeString(_iter1760); + oprot.writeString(_iter1768); } } } @@ -192541,13 +193678,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1761.size); - String _elem1762; - for (int _i1763 = 0; _i1763 < _list1761.size; ++_i1763) + org.apache.thrift.protocol.TList _list1769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1769.size); + String _elem1770; + for (int _i1771 = 0; _i1771 < _list1769.size; ++_i1771) { - _elem1762 = iprot.readString(); - struct.success.add(_elem1762); + _elem1770 = iprot.readString(); + struct.success.add(_elem1770); } } struct.setSuccessIsSet(true); @@ -195577,13 +196714,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1764 = iprot.readListBegin(); - struct.success = new ArrayList(_list1764.size); - String _elem1765; - for (int _i1766 = 0; _i1766 < _list1764.size; ++_i1766) + org.apache.thrift.protocol.TList _list1772 = iprot.readListBegin(); + struct.success = new ArrayList(_list1772.size); + String _elem1773; + for (int _i1774 = 0; _i1774 < _list1772.size; ++_i1774) { - _elem1765 = iprot.readString(); - struct.success.add(_elem1765); + _elem1773 = iprot.readString(); + struct.success.add(_elem1773); } iprot.readListEnd(); } @@ -195609,9 +196746,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1767 : struct.success) + for (String _iter1775 : struct.success) { - oprot.writeString(_iter1767); + oprot.writeString(_iter1775); } oprot.writeListEnd(); } @@ -195642,9 +196779,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1768 : struct.success) + for (String _iter1776 : struct.success) { - oprot.writeString(_iter1768); + oprot.writeString(_iter1776); } } } @@ -195656,13 +196793,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1769.size); - String _elem1770; - for (int _i1771 = 0; _i1771 < _list1769.size; ++_i1771) + org.apache.thrift.protocol.TList _list1777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1777.size); + String _elem1778; + for (int _i1779 = 0; _i1779 < _list1777.size; ++_i1779) { - _elem1770 = iprot.readString(); - struct.success.add(_elem1770); + _elem1778 = iprot.readString(); + struct.success.add(_elem1778); } } struct.setSuccessIsSet(true); @@ -212783,13 +213920,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, find_columns_with_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1772 = iprot.readListBegin(); - struct.success = new ArrayList(_list1772.size); - String _elem1773; - for (int _i1774 = 0; _i1774 < _list1772.size; ++_i1774) + org.apache.thrift.protocol.TList _list1780 = iprot.readListBegin(); + struct.success = new ArrayList(_list1780.size); + String _elem1781; + for (int _i1782 = 0; _i1782 < _list1780.size; ++_i1782) { - _elem1773 = iprot.readString(); - struct.success.add(_elem1773); + _elem1781 = iprot.readString(); + struct.success.add(_elem1781); } iprot.readListEnd(); } @@ -212815,9 +213952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, find_columns_with_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1775 : struct.success) + for (String _iter1783 : struct.success) { - oprot.writeString(_iter1775); + oprot.writeString(_iter1783); } oprot.writeListEnd(); } @@ -212848,9 +213985,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, find_columns_with_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1776 : struct.success) + for (String _iter1784 : struct.success) { - oprot.writeString(_iter1776); + oprot.writeString(_iter1784); } } } @@ -212862,13 +213999,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, find_columns_with_st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1777.size); - String _elem1778; - for (int _i1779 = 0; _i1779 < _list1777.size; ++_i1779) + org.apache.thrift.protocol.TList _list1785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1785.size); + String _elem1786; + for (int _i1787 = 0; _i1787 < _list1785.size; ++_i1787) { - _elem1778 = iprot.readString(); - struct.success.add(_elem1778); + _elem1786 = iprot.readString(); + struct.success.add(_elem1786); } } struct.setSuccessIsSet(true); @@ -249754,14 +250891,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1780 = iprot.readListBegin(); - struct.success = new ArrayList(_list1780.size); - SchemaVersion _elem1781; - for (int _i1782 = 0; _i1782 < _list1780.size; ++_i1782) + org.apache.thrift.protocol.TList _list1788 = iprot.readListBegin(); + struct.success = new ArrayList(_list1788.size); + SchemaVersion _elem1789; + for (int _i1790 = 0; _i1790 < _list1788.size; ++_i1790) { - _elem1781 = new SchemaVersion(); - _elem1781.read(iprot); - struct.success.add(_elem1781); + _elem1789 = new SchemaVersion(); + _elem1789.read(iprot); + struct.success.add(_elem1789); } iprot.readListEnd(); } @@ -249805,9 +250942,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1783 : struct.success) + for (SchemaVersion _iter1791 : struct.success) { - _iter1783.write(oprot); + _iter1791.write(oprot); } oprot.writeListEnd(); } @@ -249854,9 +250991,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1784 : struct.success) + for (SchemaVersion _iter1792 : struct.success) { - _iter1784.write(oprot); + _iter1792.write(oprot); } } } @@ -249874,14 +251011,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1785.size); - SchemaVersion _elem1786; - for (int _i1787 = 0; _i1787 < _list1785.size; ++_i1787) + org.apache.thrift.protocol.TList _list1793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1793.size); + SchemaVersion _elem1794; + for (int _i1795 = 0; _i1795 < _list1793.size; ++_i1795) { - _elem1786 = new SchemaVersion(); - _elem1786.read(iprot); - struct.success.add(_elem1786); + _elem1794 = new SchemaVersion(); + _elem1794.read(iprot); + struct.success.add(_elem1794); } } struct.setSuccessIsSet(true); @@ -258424,14 +259561,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1788 = iprot.readListBegin(); - struct.success = new ArrayList(_list1788.size); - RuntimeStat _elem1789; - for (int _i1790 = 0; _i1790 < _list1788.size; ++_i1790) + org.apache.thrift.protocol.TList _list1796 = iprot.readListBegin(); + struct.success = new ArrayList(_list1796.size); + RuntimeStat _elem1797; + for (int _i1798 = 0; _i1798 < _list1796.size; ++_i1798) { - _elem1789 = new RuntimeStat(); - _elem1789.read(iprot); - struct.success.add(_elem1789); + _elem1797 = new RuntimeStat(); + _elem1797.read(iprot); + struct.success.add(_elem1797); } iprot.readListEnd(); } @@ -258466,9 +259603,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1791 : struct.success) + for (RuntimeStat _iter1799 : struct.success) { - _iter1791.write(oprot); + _iter1799.write(oprot); } oprot.writeListEnd(); } @@ -258507,9 +259644,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1792 : struct.success) + for (RuntimeStat _iter1800 : struct.success) { - _iter1792.write(oprot); + _iter1800.write(oprot); } } } @@ -258524,14 +259661,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1793.size); - RuntimeStat _elem1794; - for (int _i1795 = 0; _i1795 < _list1793.size; ++_i1795) + org.apache.thrift.protocol.TList _list1801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1801.size); + RuntimeStat _elem1802; + for (int _i1803 = 0; _i1803 < _list1801.size; ++_i1803) { - _elem1794 = new RuntimeStat(); - _elem1794.read(iprot); - struct.success.add(_elem1794); + _elem1802 = new RuntimeStat(); + _elem1802.read(iprot); + struct.success.add(_elem1802); } } struct.setSuccessIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index f1cb36ec4f..458ebcf2bb 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -691,6 +691,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\NoSuchObjectException */ public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts); + /** + * @param \metastore\PartitionsByExprRequest $req + * @return string[] + * @throws \metastore\MetaException + * @throws \metastore\NoSuchObjectException + */ + public function get_partition_names_req(\metastore\PartitionsByExprRequest $req); /** * @param string $db_name * @param string $tbl_name @@ -6476,6 +6483,63 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_names_ps failed: unknown result"); } + public function get_partition_names_req(\metastore\PartitionsByExprRequest $req) + { + $this->send_get_partition_names_req($req); + return $this->recv_get_partition_names_req(); + } + + public function send_get_partition_names_req(\metastore\PartitionsByExprRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_get_partition_names_req_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_partition_names_req', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_partition_names_req', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_partition_names_req() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_partition_names_req_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_partition_names_req_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_partition_names_req failed: unknown result"); + } + public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts) { $this->send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts); @@ -35126,6 +35190,237 @@ class ThriftHiveMetastore_get_partition_names_ps_result { } +class ThriftHiveMetastore_get_partition_names_req_args { + static $_TSPEC; + + /** + * @var \metastore\PartitionsByExprRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\PartitionsByExprRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_partition_names_req_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\PartitionsByExprRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partition_names_req_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_partition_names_req_result { + static $_TSPEC; + + /** + * @var string[] + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_partition_names_req_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size1406 = 0; + $_etype1409 = 0; + $xfer += $input->readListBegin($_etype1409, $_size1406); + for ($_i1410 = 0; $_i1410 < $_size1406; ++$_i1410) + { + $elem1411 = null; + $xfer += $input->readString($elem1411); + $this->success []= $elem1411; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\NoSuchObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partition_names_req_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter1412) + { + $xfer += $output->writeString($iter1412); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_get_partitions_by_filter_args { static $_TSPEC; @@ -35345,15 +35640,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1406 = 0; - $_etype1409 = 0; - $xfer += $input->readListBegin($_etype1409, $_size1406); - for ($_i1410 = 0; $_i1410 < $_size1406; ++$_i1410) + $_size1413 = 0; + $_etype1416 = 0; + $xfer += $input->readListBegin($_etype1416, $_size1413); + for ($_i1417 = 0; $_i1417 < $_size1413; ++$_i1417) { - $elem1411 = null; - $elem1411 = new \metastore\Partition(); - $xfer += $elem1411->read($input); - $this->success []= $elem1411; + $elem1418 = null; + $elem1418 = new \metastore\Partition(); + $xfer += $elem1418->read($input); + $this->success []= $elem1418; } $xfer += $input->readListEnd(); } else { @@ -35397,9 +35692,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1412) + foreach ($this->success as $iter1419) { - $xfer += $iter1412->write($output); + $xfer += $iter1419->write($output); } } $output->writeListEnd(); @@ -35642,15 +35937,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1413 = 0; - $_etype1416 = 0; - $xfer += $input->readListBegin($_etype1416, $_size1413); - for ($_i1417 = 0; $_i1417 < $_size1413; ++$_i1417) + $_size1420 = 0; + $_etype1423 = 0; + $xfer += $input->readListBegin($_etype1423, $_size1420); + for ($_i1424 = 0; $_i1424 < $_size1420; ++$_i1424) { - $elem1418 = null; - $elem1418 = new \metastore\PartitionSpec(); - $xfer += $elem1418->read($input); - $this->success []= $elem1418; + $elem1425 = null; + $elem1425 = new \metastore\PartitionSpec(); + $xfer += $elem1425->read($input); + $this->success []= $elem1425; } $xfer += $input->readListEnd(); } else { @@ -35694,9 +35989,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1419) + foreach ($this->success as $iter1426) { - $xfer += $iter1419->write($output); + $xfer += $iter1426->write($output); } } $output->writeListEnd(); @@ -36262,14 +36557,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1420 = 0; - $_etype1423 = 0; - $xfer += $input->readListBegin($_etype1423, $_size1420); - for ($_i1424 = 0; $_i1424 < $_size1420; ++$_i1424) + $_size1427 = 0; + $_etype1430 = 0; + $xfer += $input->readListBegin($_etype1430, $_size1427); + for ($_i1431 = 0; $_i1431 < $_size1427; ++$_i1431) { - $elem1425 = null; - $xfer += $input->readString($elem1425); - $this->names []= $elem1425; + $elem1432 = null; + $xfer += $input->readString($elem1432); + $this->names []= $elem1432; } $xfer += $input->readListEnd(); } else { @@ -36307,9 +36602,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1426) + foreach ($this->names as $iter1433) { - $xfer += $output->writeString($iter1426); + $xfer += $output->writeString($iter1433); } } $output->writeListEnd(); @@ -36398,15 +36693,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1427 = 0; - $_etype1430 = 0; - $xfer += $input->readListBegin($_etype1430, $_size1427); - for ($_i1431 = 0; $_i1431 < $_size1427; ++$_i1431) + $_size1434 = 0; + $_etype1437 = 0; + $xfer += $input->readListBegin($_etype1437, $_size1434); + for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) { - $elem1432 = null; - $elem1432 = new \metastore\Partition(); - $xfer += $elem1432->read($input); - $this->success []= $elem1432; + $elem1439 = null; + $elem1439 = new \metastore\Partition(); + $xfer += $elem1439->read($input); + $this->success []= $elem1439; } $xfer += $input->readListEnd(); } else { @@ -36450,9 +36745,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1433) + foreach ($this->success as $iter1440) { - $xfer += $iter1433->write($output); + $xfer += $iter1440->write($output); } } $output->writeListEnd(); @@ -37001,15 +37296,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1434 = 0; - $_etype1437 = 0; - $xfer += $input->readListBegin($_etype1437, $_size1434); - for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) + $_size1441 = 0; + $_etype1444 = 0; + $xfer += $input->readListBegin($_etype1444, $_size1441); + for ($_i1445 = 0; $_i1445 < $_size1441; ++$_i1445) { - $elem1439 = null; - $elem1439 = new \metastore\Partition(); - $xfer += $elem1439->read($input); - $this->new_parts []= $elem1439; + $elem1446 = null; + $elem1446 = new \metastore\Partition(); + $xfer += $elem1446->read($input); + $this->new_parts []= $elem1446; } $xfer += $input->readListEnd(); } else { @@ -37047,9 +37342,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1440) + foreach ($this->new_parts as $iter1447) { - $xfer += $iter1440->write($output); + $xfer += $iter1447->write($output); } } $output->writeListEnd(); @@ -37264,15 +37559,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1441 = 0; - $_etype1444 = 0; - $xfer += $input->readListBegin($_etype1444, $_size1441); - for ($_i1445 = 0; $_i1445 < $_size1441; ++$_i1445) + $_size1448 = 0; + $_etype1451 = 0; + $xfer += $input->readListBegin($_etype1451, $_size1448); + for ($_i1452 = 0; $_i1452 < $_size1448; ++$_i1452) { - $elem1446 = null; - $elem1446 = new \metastore\Partition(); - $xfer += $elem1446->read($input); - $this->new_parts []= $elem1446; + $elem1453 = null; + $elem1453 = new \metastore\Partition(); + $xfer += $elem1453->read($input); + $this->new_parts []= $elem1453; } $xfer += $input->readListEnd(); } else { @@ -37318,9 +37613,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1447) + foreach ($this->new_parts as $iter1454) { - $xfer += $iter1447->write($output); + $xfer += $iter1454->write($output); } } $output->writeListEnd(); @@ -38008,14 +38303,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1448 = 0; - $_etype1451 = 0; - $xfer += $input->readListBegin($_etype1451, $_size1448); - for ($_i1452 = 0; $_i1452 < $_size1448; ++$_i1452) + $_size1455 = 0; + $_etype1458 = 0; + $xfer += $input->readListBegin($_etype1458, $_size1455); + for ($_i1459 = 0; $_i1459 < $_size1455; ++$_i1459) { - $elem1453 = null; - $xfer += $input->readString($elem1453); - $this->part_vals []= $elem1453; + $elem1460 = null; + $xfer += $input->readString($elem1460); + $this->part_vals []= $elem1460; } $xfer += $input->readListEnd(); } else { @@ -38061,9 +38356,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1454) + foreach ($this->part_vals as $iter1461) { - $xfer += $output->writeString($iter1454); + $xfer += $output->writeString($iter1461); } } $output->writeListEnd(); @@ -38458,14 +38753,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1455 = 0; - $_etype1458 = 0; - $xfer += $input->readListBegin($_etype1458, $_size1455); - for ($_i1459 = 0; $_i1459 < $_size1455; ++$_i1459) + $_size1462 = 0; + $_etype1465 = 0; + $xfer += $input->readListBegin($_etype1465, $_size1462); + for ($_i1466 = 0; $_i1466 < $_size1462; ++$_i1466) { - $elem1460 = null; - $xfer += $input->readString($elem1460); - $this->part_vals []= $elem1460; + $elem1467 = null; + $xfer += $input->readString($elem1467); + $this->part_vals []= $elem1467; } $xfer += $input->readListEnd(); } else { @@ -38500,9 +38795,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1461) + foreach ($this->part_vals as $iter1468) { - $xfer += $output->writeString($iter1461); + $xfer += $output->writeString($iter1468); } } $output->writeListEnd(); @@ -38956,14 +39251,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1462 = 0; - $_etype1465 = 0; - $xfer += $input->readListBegin($_etype1465, $_size1462); - for ($_i1466 = 0; $_i1466 < $_size1462; ++$_i1466) + $_size1469 = 0; + $_etype1472 = 0; + $xfer += $input->readListBegin($_etype1472, $_size1469); + for ($_i1473 = 0; $_i1473 < $_size1469; ++$_i1473) { - $elem1467 = null; - $xfer += $input->readString($elem1467); - $this->success []= $elem1467; + $elem1474 = null; + $xfer += $input->readString($elem1474); + $this->success []= $elem1474; } $xfer += $input->readListEnd(); } else { @@ -38999,9 +39294,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1468) + foreach ($this->success as $iter1475) { - $xfer += $output->writeString($iter1468); + $xfer += $output->writeString($iter1475); } } $output->writeListEnd(); @@ -39161,17 +39456,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1469 = 0; - $_ktype1470 = 0; - $_vtype1471 = 0; - $xfer += $input->readMapBegin($_ktype1470, $_vtype1471, $_size1469); - for ($_i1473 = 0; $_i1473 < $_size1469; ++$_i1473) + $_size1476 = 0; + $_ktype1477 = 0; + $_vtype1478 = 0; + $xfer += $input->readMapBegin($_ktype1477, $_vtype1478, $_size1476); + for ($_i1480 = 0; $_i1480 < $_size1476; ++$_i1480) { - $key1474 = ''; - $val1475 = ''; - $xfer += $input->readString($key1474); - $xfer += $input->readString($val1475); - $this->success[$key1474] = $val1475; + $key1481 = ''; + $val1482 = ''; + $xfer += $input->readString($key1481); + $xfer += $input->readString($val1482); + $this->success[$key1481] = $val1482; } $xfer += $input->readMapEnd(); } else { @@ -39207,10 +39502,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1476 => $viter1477) + foreach ($this->success as $kiter1483 => $viter1484) { - $xfer += $output->writeString($kiter1476); - $xfer += $output->writeString($viter1477); + $xfer += $output->writeString($kiter1483); + $xfer += $output->writeString($viter1484); } } $output->writeMapEnd(); @@ -39330,17 +39625,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1478 = 0; - $_ktype1479 = 0; - $_vtype1480 = 0; - $xfer += $input->readMapBegin($_ktype1479, $_vtype1480, $_size1478); - for ($_i1482 = 0; $_i1482 < $_size1478; ++$_i1482) + $_size1485 = 0; + $_ktype1486 = 0; + $_vtype1487 = 0; + $xfer += $input->readMapBegin($_ktype1486, $_vtype1487, $_size1485); + for ($_i1489 = 0; $_i1489 < $_size1485; ++$_i1489) { - $key1483 = ''; - $val1484 = ''; - $xfer += $input->readString($key1483); - $xfer += $input->readString($val1484); - $this->part_vals[$key1483] = $val1484; + $key1490 = ''; + $val1491 = ''; + $xfer += $input->readString($key1490); + $xfer += $input->readString($val1491); + $this->part_vals[$key1490] = $val1491; } $xfer += $input->readMapEnd(); } else { @@ -39385,10 +39680,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1485 => $viter1486) + foreach ($this->part_vals as $kiter1492 => $viter1493) { - $xfer += $output->writeString($kiter1485); - $xfer += $output->writeString($viter1486); + $xfer += $output->writeString($kiter1492); + $xfer += $output->writeString($viter1493); } } $output->writeMapEnd(); @@ -39710,17 +40005,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1487 = 0; - $_ktype1488 = 0; - $_vtype1489 = 0; - $xfer += $input->readMapBegin($_ktype1488, $_vtype1489, $_size1487); - for ($_i1491 = 0; $_i1491 < $_size1487; ++$_i1491) + $_size1494 = 0; + $_ktype1495 = 0; + $_vtype1496 = 0; + $xfer += $input->readMapBegin($_ktype1495, $_vtype1496, $_size1494); + for ($_i1498 = 0; $_i1498 < $_size1494; ++$_i1498) { - $key1492 = ''; - $val1493 = ''; - $xfer += $input->readString($key1492); - $xfer += $input->readString($val1493); - $this->part_vals[$key1492] = $val1493; + $key1499 = ''; + $val1500 = ''; + $xfer += $input->readString($key1499); + $xfer += $input->readString($val1500); + $this->part_vals[$key1499] = $val1500; } $xfer += $input->readMapEnd(); } else { @@ -39765,10 +40060,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1494 => $viter1495) + foreach ($this->part_vals as $kiter1501 => $viter1502) { - $xfer += $output->writeString($kiter1494); - $xfer += $output->writeString($viter1495); + $xfer += $output->writeString($kiter1501); + $xfer += $output->writeString($viter1502); } } $output->writeMapEnd(); @@ -45293,14 +45588,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1496 = 0; - $_etype1499 = 0; - $xfer += $input->readListBegin($_etype1499, $_size1496); - for ($_i1500 = 0; $_i1500 < $_size1496; ++$_i1500) + $_size1503 = 0; + $_etype1506 = 0; + $xfer += $input->readListBegin($_etype1506, $_size1503); + for ($_i1507 = 0; $_i1507 < $_size1503; ++$_i1507) { - $elem1501 = null; - $xfer += $input->readString($elem1501); - $this->success []= $elem1501; + $elem1508 = null; + $xfer += $input->readString($elem1508); + $this->success []= $elem1508; } $xfer += $input->readListEnd(); } else { @@ -45336,9 +45631,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1502) + foreach ($this->success as $iter1509) { - $xfer += $output->writeString($iter1502); + $xfer += $output->writeString($iter1509); } } $output->writeListEnd(); @@ -46207,14 +46502,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1503 = 0; - $_etype1506 = 0; - $xfer += $input->readListBegin($_etype1506, $_size1503); - for ($_i1507 = 0; $_i1507 < $_size1503; ++$_i1507) + $_size1510 = 0; + $_etype1513 = 0; + $xfer += $input->readListBegin($_etype1513, $_size1510); + for ($_i1514 = 0; $_i1514 < $_size1510; ++$_i1514) { - $elem1508 = null; - $xfer += $input->readString($elem1508); - $this->success []= $elem1508; + $elem1515 = null; + $xfer += $input->readString($elem1515); + $this->success []= $elem1515; } $xfer += $input->readListEnd(); } else { @@ -46250,9 +46545,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1509) + foreach ($this->success as $iter1516) { - $xfer += $output->writeString($iter1509); + $xfer += $output->writeString($iter1516); } } $output->writeListEnd(); @@ -46943,15 +47238,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1510 = 0; - $_etype1513 = 0; - $xfer += $input->readListBegin($_etype1513, $_size1510); - for ($_i1514 = 0; $_i1514 < $_size1510; ++$_i1514) + $_size1517 = 0; + $_etype1520 = 0; + $xfer += $input->readListBegin($_etype1520, $_size1517); + for ($_i1521 = 0; $_i1521 < $_size1517; ++$_i1521) { - $elem1515 = null; - $elem1515 = new \metastore\Role(); - $xfer += $elem1515->read($input); - $this->success []= $elem1515; + $elem1522 = null; + $elem1522 = new \metastore\Role(); + $xfer += $elem1522->read($input); + $this->success []= $elem1522; } $xfer += $input->readListEnd(); } else { @@ -46987,9 +47282,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1516) + foreach ($this->success as $iter1523) { - $xfer += $iter1516->write($output); + $xfer += $iter1523->write($output); } } $output->writeListEnd(); @@ -47651,14 +47946,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1517 = 0; - $_etype1520 = 0; - $xfer += $input->readListBegin($_etype1520, $_size1517); - for ($_i1521 = 0; $_i1521 < $_size1517; ++$_i1521) + $_size1524 = 0; + $_etype1527 = 0; + $xfer += $input->readListBegin($_etype1527, $_size1524); + for ($_i1528 = 0; $_i1528 < $_size1524; ++$_i1528) { - $elem1522 = null; - $xfer += $input->readString($elem1522); - $this->group_names []= $elem1522; + $elem1529 = null; + $xfer += $input->readString($elem1529); + $this->group_names []= $elem1529; } $xfer += $input->readListEnd(); } else { @@ -47699,9 +47994,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1523) + foreach ($this->group_names as $iter1530) { - $xfer += $output->writeString($iter1523); + $xfer += $output->writeString($iter1530); } } $output->writeListEnd(); @@ -48009,15 +48304,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1524 = 0; - $_etype1527 = 0; - $xfer += $input->readListBegin($_etype1527, $_size1524); - for ($_i1528 = 0; $_i1528 < $_size1524; ++$_i1528) + $_size1531 = 0; + $_etype1534 = 0; + $xfer += $input->readListBegin($_etype1534, $_size1531); + for ($_i1535 = 0; $_i1535 < $_size1531; ++$_i1535) { - $elem1529 = null; - $elem1529 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1529->read($input); - $this->success []= $elem1529; + $elem1536 = null; + $elem1536 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1536->read($input); + $this->success []= $elem1536; } $xfer += $input->readListEnd(); } else { @@ -48053,9 +48348,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1530) + foreach ($this->success as $iter1537) { - $xfer += $iter1530->write($output); + $xfer += $iter1537->write($output); } } $output->writeListEnd(); @@ -48923,14 +49218,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1531 = 0; - $_etype1534 = 0; - $xfer += $input->readListBegin($_etype1534, $_size1531); - for ($_i1535 = 0; $_i1535 < $_size1531; ++$_i1535) + $_size1538 = 0; + $_etype1541 = 0; + $xfer += $input->readListBegin($_etype1541, $_size1538); + for ($_i1542 = 0; $_i1542 < $_size1538; ++$_i1542) { - $elem1536 = null; - $xfer += $input->readString($elem1536); - $this->group_names []= $elem1536; + $elem1543 = null; + $xfer += $input->readString($elem1543); + $this->group_names []= $elem1543; } $xfer += $input->readListEnd(); } else { @@ -48963,9 +49258,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1537) + foreach ($this->group_names as $iter1544) { - $xfer += $output->writeString($iter1537); + $xfer += $output->writeString($iter1544); } } $output->writeListEnd(); @@ -49041,14 +49336,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1538 = 0; - $_etype1541 = 0; - $xfer += $input->readListBegin($_etype1541, $_size1538); - for ($_i1542 = 0; $_i1542 < $_size1538; ++$_i1542) + $_size1545 = 0; + $_etype1548 = 0; + $xfer += $input->readListBegin($_etype1548, $_size1545); + for ($_i1549 = 0; $_i1549 < $_size1545; ++$_i1549) { - $elem1543 = null; - $xfer += $input->readString($elem1543); - $this->success []= $elem1543; + $elem1550 = null; + $xfer += $input->readString($elem1550); + $this->success []= $elem1550; } $xfer += $input->readListEnd(); } else { @@ -49084,9 +49379,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1544) + foreach ($this->success as $iter1551) { - $xfer += $output->writeString($iter1544); + $xfer += $output->writeString($iter1551); } } $output->writeListEnd(); @@ -50203,14 +50498,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1545 = 0; - $_etype1548 = 0; - $xfer += $input->readListBegin($_etype1548, $_size1545); - for ($_i1549 = 0; $_i1549 < $_size1545; ++$_i1549) + $_size1552 = 0; + $_etype1555 = 0; + $xfer += $input->readListBegin($_etype1555, $_size1552); + for ($_i1556 = 0; $_i1556 < $_size1552; ++$_i1556) { - $elem1550 = null; - $xfer += $input->readString($elem1550); - $this->success []= $elem1550; + $elem1557 = null; + $xfer += $input->readString($elem1557); + $this->success []= $elem1557; } $xfer += $input->readListEnd(); } else { @@ -50238,9 +50533,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1551) + foreach ($this->success as $iter1558) { - $xfer += $output->writeString($iter1551); + $xfer += $output->writeString($iter1558); } } $output->writeListEnd(); @@ -50879,14 +51174,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1552 = 0; - $_etype1555 = 0; - $xfer += $input->readListBegin($_etype1555, $_size1552); - for ($_i1556 = 0; $_i1556 < $_size1552; ++$_i1556) + $_size1559 = 0; + $_etype1562 = 0; + $xfer += $input->readListBegin($_etype1562, $_size1559); + for ($_i1563 = 0; $_i1563 < $_size1559; ++$_i1563) { - $elem1557 = null; - $xfer += $input->readString($elem1557); - $this->success []= $elem1557; + $elem1564 = null; + $xfer += $input->readString($elem1564); + $this->success []= $elem1564; } $xfer += $input->readListEnd(); } else { @@ -50914,9 +51209,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1558) + foreach ($this->success as $iter1565) { - $xfer += $output->writeString($iter1558); + $xfer += $output->writeString($iter1565); } } $output->writeListEnd(); @@ -54670,14 +54965,14 @@ class ThriftHiveMetastore_find_columns_with_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1559 = 0; - $_etype1562 = 0; - $xfer += $input->readListBegin($_etype1562, $_size1559); - for ($_i1563 = 0; $_i1563 < $_size1559; ++$_i1563) + $_size1566 = 0; + $_etype1569 = 0; + $xfer += $input->readListBegin($_etype1569, $_size1566); + for ($_i1570 = 0; $_i1570 < $_size1566; ++$_i1570) { - $elem1564 = null; - $xfer += $input->readString($elem1564); - $this->success []= $elem1564; + $elem1571 = null; + $xfer += $input->readString($elem1571); + $this->success []= $elem1571; } $xfer += $input->readListEnd(); } else { @@ -54705,9 +55000,9 @@ class ThriftHiveMetastore_find_columns_with_stats_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1565) + foreach ($this->success as $iter1572) { - $xfer += $output->writeString($iter1565); + $xfer += $output->writeString($iter1572); } } $output->writeListEnd(); @@ -62878,15 +63173,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1566 = 0; - $_etype1569 = 0; - $xfer += $input->readListBegin($_etype1569, $_size1566); - for ($_i1570 = 0; $_i1570 < $_size1566; ++$_i1570) + $_size1573 = 0; + $_etype1576 = 0; + $xfer += $input->readListBegin($_etype1576, $_size1573); + for ($_i1577 = 0; $_i1577 < $_size1573; ++$_i1577) { - $elem1571 = null; - $elem1571 = new \metastore\SchemaVersion(); - $xfer += $elem1571->read($input); - $this->success []= $elem1571; + $elem1578 = null; + $elem1578 = new \metastore\SchemaVersion(); + $xfer += $elem1578->read($input); + $this->success []= $elem1578; } $xfer += $input->readListEnd(); } else { @@ -62930,9 +63225,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1572) + foreach ($this->success as $iter1579) { - $xfer += $iter1572->write($output); + $xfer += $iter1579->write($output); } } $output->writeListEnd(); @@ -64801,15 +65096,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1573 = 0; - $_etype1576 = 0; - $xfer += $input->readListBegin($_etype1576, $_size1573); - for ($_i1577 = 0; $_i1577 < $_size1573; ++$_i1577) + $_size1580 = 0; + $_etype1583 = 0; + $xfer += $input->readListBegin($_etype1583, $_size1580); + for ($_i1584 = 0; $_i1584 < $_size1580; ++$_i1584) { - $elem1578 = null; - $elem1578 = new \metastore\RuntimeStat(); - $xfer += $elem1578->read($input); - $this->success []= $elem1578; + $elem1585 = null; + $elem1585 = new \metastore\RuntimeStat(); + $xfer += $elem1585->read($input); + $this->success []= $elem1585; } $xfer += $input->readListEnd(); } else { @@ -64845,9 +65140,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1579) + foreach ($this->success as $iter1586) { - $xfer += $iter1579->write($output); + $xfer += $iter1586->write($output); } } $output->writeListEnd(); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index 9fb7ff011a..8704783859 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -14270,6 +14270,10 @@ class PartitionsByExprRequest { * @var string */ public $catName = null; + /** + * @var string + */ + public $order = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -14298,6 +14302,10 @@ class PartitionsByExprRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'order', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -14319,6 +14327,9 @@ class PartitionsByExprRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['order'])) { + $this->order = $vals['order']; + } } } @@ -14383,6 +14394,13 @@ class PartitionsByExprRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->order); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -14426,6 +14444,11 @@ class PartitionsByExprRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->order !== null) { + $xfer += $output->writeFieldBegin('order', TType::STRING, 7); + $xfer += $output->writeString($this->order); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index b714b62f95..a0737c7adc 100755 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -106,6 +106,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts)') print(' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names)') print(' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)') + print(' get_partition_names_req(PartitionsByExprRequest req)') print(' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)') print(' get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)') print(' PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)') @@ -816,6 +817,12 @@ elif cmd == 'get_partition_names_ps': sys.exit(1) pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) +elif cmd == 'get_partition_names_req': + if len(args) != 1: + print('get_partition_names_req requires 1 args') + sys.exit(1) + pp.pprint(client.get_partition_names_req(eval(args[0]),)) + elif cmd == 'get_partitions_by_filter': if len(args) != 4: print('get_partitions_by_filter requires 4 args') diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 37d5d03eed..cd5b139ae7 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -692,6 +692,13 @@ def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): """ pass + def get_partition_names_req(self, req): + """ + Parameters: + - req + """ + pass + def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): """ Parameters: @@ -4834,6 +4841,41 @@ def recv_get_partition_names_ps(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result") + def get_partition_names_req(self, req): + """ + Parameters: + - req + """ + self.send_get_partition_names_req(req) + return self.recv_get_partition_names_req() + + def send_get_partition_names_req(self, req): + self._oprot.writeMessageBegin('get_partition_names_req', TMessageType.CALL, self._seqid) + args = get_partition_names_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_names_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_names_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_req failed: unknown result") + def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): """ Parameters: @@ -10064,6 +10106,7 @@ def __init__(self, handler): self._processMap["get_partitions_ps"] = Processor.process_get_partitions_ps self._processMap["get_partitions_ps_with_auth"] = Processor.process_get_partitions_ps_with_auth self._processMap["get_partition_names_ps"] = Processor.process_get_partition_names_ps + self._processMap["get_partition_names_req"] = Processor.process_get_partition_names_req self._processMap["get_partitions_by_filter"] = Processor.process_get_partitions_by_filter self._processMap["get_part_specs_by_filter"] = Processor.process_get_part_specs_by_filter self._processMap["get_partitions_by_expr"] = Processor.process_get_partitions_by_expr @@ -12333,6 +12376,31 @@ def process_get_partition_names_ps(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_partition_names_req(self, seqid, iprot, oprot): + args = get_partition_names_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_names_req_result() + try: + result.success = self._handler.get_partition_names_req(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_partition_names_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_get_partitions_by_filter(self, seqid, iprot, oprot): args = get_partitions_by_filter_args() args.read(iprot) @@ -30563,6 +30631,172 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class get_partition_names_req_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (PartitionsByExprRequest, PartitionsByExprRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = PartitionsByExprRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partition_names_req_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_partition_names_req_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1406, _size1403) = iprot.readListBegin() + for _i1407 in xrange(_size1403): + _elem1408 = iprot.readString() + self.success.append(_elem1408) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partition_names_req_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1409 in self.success: + oprot.writeString(iter1409) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class get_partitions_by_filter_args: """ Attributes: @@ -30698,11 +30932,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1406, _size1403) = iprot.readListBegin() - for _i1407 in xrange(_size1403): - _elem1408 = Partition() - _elem1408.read(iprot) - self.success.append(_elem1408) + (_etype1413, _size1410) = iprot.readListBegin() + for _i1414 in xrange(_size1410): + _elem1415 = Partition() + _elem1415.read(iprot) + self.success.append(_elem1415) iprot.readListEnd() else: iprot.skip(ftype) @@ -30731,8 +30965,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1409 in self.success: - iter1409.write(oprot) + for iter1416 in self.success: + iter1416.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30903,11 +31137,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1413, _size1410) = iprot.readListBegin() - for _i1414 in xrange(_size1410): - _elem1415 = PartitionSpec() - _elem1415.read(iprot) - self.success.append(_elem1415) + (_etype1420, _size1417) = iprot.readListBegin() + for _i1421 in xrange(_size1417): + _elem1422 = PartitionSpec() + _elem1422.read(iprot) + self.success.append(_elem1422) iprot.readListEnd() else: iprot.skip(ftype) @@ -30936,8 +31170,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1416 in self.success: - iter1416.write(oprot) + for iter1423 in self.success: + iter1423.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31357,10 +31591,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1420, _size1417) = iprot.readListBegin() - for _i1421 in xrange(_size1417): - _elem1422 = iprot.readString() - self.names.append(_elem1422) + (_etype1427, _size1424) = iprot.readListBegin() + for _i1428 in xrange(_size1424): + _elem1429 = iprot.readString() + self.names.append(_elem1429) iprot.readListEnd() else: iprot.skip(ftype) @@ -31385,8 +31619,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1423 in self.names: - oprot.writeString(iter1423) + for iter1430 in self.names: + oprot.writeString(iter1430) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31445,11 +31679,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1427, _size1424) = iprot.readListBegin() - for _i1428 in xrange(_size1424): - _elem1429 = Partition() - _elem1429.read(iprot) - self.success.append(_elem1429) + (_etype1434, _size1431) = iprot.readListBegin() + for _i1435 in xrange(_size1431): + _elem1436 = Partition() + _elem1436.read(iprot) + self.success.append(_elem1436) iprot.readListEnd() else: iprot.skip(ftype) @@ -31478,8 +31712,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1430 in self.success: - iter1430.write(oprot) + for iter1437 in self.success: + iter1437.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31888,11 +32122,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1434, _size1431) = iprot.readListBegin() - for _i1435 in xrange(_size1431): - _elem1436 = Partition() - _elem1436.read(iprot) - self.new_parts.append(_elem1436) + (_etype1441, _size1438) = iprot.readListBegin() + for _i1442 in xrange(_size1438): + _elem1443 = Partition() + _elem1443.read(iprot) + self.new_parts.append(_elem1443) iprot.readListEnd() else: iprot.skip(ftype) @@ -31917,8 +32151,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1437 in self.new_parts: - iter1437.write(oprot) + for iter1444 in self.new_parts: + iter1444.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -32071,11 +32305,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1441, _size1438) = iprot.readListBegin() - for _i1442 in xrange(_size1438): - _elem1443 = Partition() - _elem1443.read(iprot) - self.new_parts.append(_elem1443) + (_etype1448, _size1445) = iprot.readListBegin() + for _i1449 in xrange(_size1445): + _elem1450 = Partition() + _elem1450.read(iprot) + self.new_parts.append(_elem1450) iprot.readListEnd() else: iprot.skip(ftype) @@ -32106,8 +32340,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1444 in self.new_parts: - iter1444.write(oprot) + for iter1451 in self.new_parts: + iter1451.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -32610,10 +32844,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1448, _size1445) = iprot.readListBegin() - for _i1449 in xrange(_size1445): - _elem1450 = iprot.readString() - self.part_vals.append(_elem1450) + (_etype1455, _size1452) = iprot.readListBegin() + for _i1456 in xrange(_size1452): + _elem1457 = iprot.readString() + self.part_vals.append(_elem1457) iprot.readListEnd() else: iprot.skip(ftype) @@ -32644,8 +32878,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1451 in self.part_vals: - oprot.writeString(iter1451) + for iter1458 in self.part_vals: + oprot.writeString(iter1458) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -32946,10 +33180,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1455, _size1452) = iprot.readListBegin() - for _i1456 in xrange(_size1452): - _elem1457 = iprot.readString() - self.part_vals.append(_elem1457) + (_etype1462, _size1459) = iprot.readListBegin() + for _i1463 in xrange(_size1459): + _elem1464 = iprot.readString() + self.part_vals.append(_elem1464) iprot.readListEnd() else: iprot.skip(ftype) @@ -32971,8 +33205,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1458 in self.part_vals: - oprot.writeString(iter1458) + for iter1465 in self.part_vals: + oprot.writeString(iter1465) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -33330,10 +33564,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1462, _size1459) = iprot.readListBegin() - for _i1463 in xrange(_size1459): - _elem1464 = iprot.readString() - self.success.append(_elem1464) + (_etype1469, _size1466) = iprot.readListBegin() + for _i1470 in xrange(_size1466): + _elem1471 = iprot.readString() + self.success.append(_elem1471) iprot.readListEnd() else: iprot.skip(ftype) @@ -33356,8 +33590,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1465 in self.success: - oprot.writeString(iter1465) + for iter1472 in self.success: + oprot.writeString(iter1472) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33481,11 +33715,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1467, _vtype1468, _size1466 ) = iprot.readMapBegin() - for _i1470 in xrange(_size1466): - _key1471 = iprot.readString() - _val1472 = iprot.readString() - self.success[_key1471] = _val1472 + (_ktype1474, _vtype1475, _size1473 ) = iprot.readMapBegin() + for _i1477 in xrange(_size1473): + _key1478 = iprot.readString() + _val1479 = iprot.readString() + self.success[_key1478] = _val1479 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33508,9 +33742,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1473,viter1474 in self.success.items(): - oprot.writeString(kiter1473) - oprot.writeString(viter1474) + for kiter1480,viter1481 in self.success.items(): + oprot.writeString(kiter1480) + oprot.writeString(viter1481) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33586,11 +33820,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1476, _vtype1477, _size1475 ) = iprot.readMapBegin() - for _i1479 in xrange(_size1475): - _key1480 = iprot.readString() - _val1481 = iprot.readString() - self.part_vals[_key1480] = _val1481 + (_ktype1483, _vtype1484, _size1482 ) = iprot.readMapBegin() + for _i1486 in xrange(_size1482): + _key1487 = iprot.readString() + _val1488 = iprot.readString() + self.part_vals[_key1487] = _val1488 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33620,9 +33854,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1482,viter1483 in self.part_vals.items(): - oprot.writeString(kiter1482) - oprot.writeString(viter1483) + for kiter1489,viter1490 in self.part_vals.items(): + oprot.writeString(kiter1489) + oprot.writeString(viter1490) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -33836,11 +34070,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1485, _vtype1486, _size1484 ) = iprot.readMapBegin() - for _i1488 in xrange(_size1484): - _key1489 = iprot.readString() - _val1490 = iprot.readString() - self.part_vals[_key1489] = _val1490 + (_ktype1492, _vtype1493, _size1491 ) = iprot.readMapBegin() + for _i1495 in xrange(_size1491): + _key1496 = iprot.readString() + _val1497 = iprot.readString() + self.part_vals[_key1496] = _val1497 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33870,9 +34104,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1491,viter1492 in self.part_vals.items(): - oprot.writeString(kiter1491) - oprot.writeString(viter1492) + for kiter1498,viter1499 in self.part_vals.items(): + oprot.writeString(kiter1498) + oprot.writeString(viter1499) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -37924,10 +38158,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1496, _size1493) = iprot.readListBegin() - for _i1497 in xrange(_size1493): - _elem1498 = iprot.readString() - self.success.append(_elem1498) + (_etype1503, _size1500) = iprot.readListBegin() + for _i1504 in xrange(_size1500): + _elem1505 = iprot.readString() + self.success.append(_elem1505) iprot.readListEnd() else: iprot.skip(ftype) @@ -37950,8 +38184,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1499 in self.success: - oprot.writeString(iter1499) + for iter1506 in self.success: + oprot.writeString(iter1506) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38639,10 +38873,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1503, _size1500) = iprot.readListBegin() - for _i1504 in xrange(_size1500): - _elem1505 = iprot.readString() - self.success.append(_elem1505) + (_etype1510, _size1507) = iprot.readListBegin() + for _i1511 in xrange(_size1507): + _elem1512 = iprot.readString() + self.success.append(_elem1512) iprot.readListEnd() else: iprot.skip(ftype) @@ -38665,8 +38899,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1506 in self.success: - oprot.writeString(iter1506) + for iter1513 in self.success: + oprot.writeString(iter1513) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39180,11 +39414,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1510, _size1507) = iprot.readListBegin() - for _i1511 in xrange(_size1507): - _elem1512 = Role() - _elem1512.read(iprot) - self.success.append(_elem1512) + (_etype1517, _size1514) = iprot.readListBegin() + for _i1518 in xrange(_size1514): + _elem1519 = Role() + _elem1519.read(iprot) + self.success.append(_elem1519) iprot.readListEnd() else: iprot.skip(ftype) @@ -39207,8 +39441,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1513 in self.success: - iter1513.write(oprot) + for iter1520 in self.success: + iter1520.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39717,10 +39951,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1517, _size1514) = iprot.readListBegin() - for _i1518 in xrange(_size1514): - _elem1519 = iprot.readString() - self.group_names.append(_elem1519) + (_etype1524, _size1521) = iprot.readListBegin() + for _i1525 in xrange(_size1521): + _elem1526 = iprot.readString() + self.group_names.append(_elem1526) iprot.readListEnd() else: iprot.skip(ftype) @@ -39745,8 +39979,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1520 in self.group_names: - oprot.writeString(iter1520) + for iter1527 in self.group_names: + oprot.writeString(iter1527) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -39973,11 +40207,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1524, _size1521) = iprot.readListBegin() - for _i1525 in xrange(_size1521): - _elem1526 = HiveObjectPrivilege() - _elem1526.read(iprot) - self.success.append(_elem1526) + (_etype1531, _size1528) = iprot.readListBegin() + for _i1532 in xrange(_size1528): + _elem1533 = HiveObjectPrivilege() + _elem1533.read(iprot) + self.success.append(_elem1533) iprot.readListEnd() else: iprot.skip(ftype) @@ -40000,8 +40234,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1527 in self.success: - iter1527.write(oprot) + for iter1534 in self.success: + iter1534.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -40671,10 +40905,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1531, _size1528) = iprot.readListBegin() - for _i1532 in xrange(_size1528): - _elem1533 = iprot.readString() - self.group_names.append(_elem1533) + (_etype1538, _size1535) = iprot.readListBegin() + for _i1539 in xrange(_size1535): + _elem1540 = iprot.readString() + self.group_names.append(_elem1540) iprot.readListEnd() else: iprot.skip(ftype) @@ -40695,8 +40929,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1534 in self.group_names: - oprot.writeString(iter1534) + for iter1541 in self.group_names: + oprot.writeString(iter1541) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -40751,10 +40985,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1538, _size1535) = iprot.readListBegin() - for _i1539 in xrange(_size1535): - _elem1540 = iprot.readString() - self.success.append(_elem1540) + (_etype1545, _size1542) = iprot.readListBegin() + for _i1546 in xrange(_size1542): + _elem1547 = iprot.readString() + self.success.append(_elem1547) iprot.readListEnd() else: iprot.skip(ftype) @@ -40777,8 +41011,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1541 in self.success: - oprot.writeString(iter1541) + for iter1548 in self.success: + oprot.writeString(iter1548) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -41710,10 +41944,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1545, _size1542) = iprot.readListBegin() - for _i1546 in xrange(_size1542): - _elem1547 = iprot.readString() - self.success.append(_elem1547) + (_etype1552, _size1549) = iprot.readListBegin() + for _i1553 in xrange(_size1549): + _elem1554 = iprot.readString() + self.success.append(_elem1554) iprot.readListEnd() else: iprot.skip(ftype) @@ -41730,8 +41964,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1548 in self.success: - oprot.writeString(iter1548) + for iter1555 in self.success: + oprot.writeString(iter1555) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -42258,10 +42492,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1552, _size1549) = iprot.readListBegin() - for _i1553 in xrange(_size1549): - _elem1554 = iprot.readString() - self.success.append(_elem1554) + (_etype1559, _size1556) = iprot.readListBegin() + for _i1560 in xrange(_size1556): + _elem1561 = iprot.readString() + self.success.append(_elem1561) iprot.readListEnd() else: iprot.skip(ftype) @@ -42278,8 +42512,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1555 in self.success: - oprot.writeString(iter1555) + for iter1562 in self.success: + oprot.writeString(iter1562) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -45292,10 +45526,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1559, _size1556) = iprot.readListBegin() - for _i1560 in xrange(_size1556): - _elem1561 = iprot.readString() - self.success.append(_elem1561) + (_etype1566, _size1563) = iprot.readListBegin() + for _i1567 in xrange(_size1563): + _elem1568 = iprot.readString() + self.success.append(_elem1568) iprot.readListEnd() else: iprot.skip(ftype) @@ -45312,8 +45546,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1562 in self.success: - oprot.writeString(iter1562) + for iter1569 in self.success: + oprot.writeString(iter1569) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -51623,11 +51857,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1566, _size1563) = iprot.readListBegin() - for _i1567 in xrange(_size1563): - _elem1568 = SchemaVersion() - _elem1568.read(iprot) - self.success.append(_elem1568) + (_etype1573, _size1570) = iprot.readListBegin() + for _i1574 in xrange(_size1570): + _elem1575 = SchemaVersion() + _elem1575.read(iprot) + self.success.append(_elem1575) iprot.readListEnd() else: iprot.skip(ftype) @@ -51656,8 +51890,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1569 in self.success: - iter1569.write(oprot) + for iter1576 in self.success: + iter1576.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -53132,11 +53366,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1573, _size1570) = iprot.readListBegin() - for _i1574 in xrange(_size1570): - _elem1575 = RuntimeStat() - _elem1575.read(iprot) - self.success.append(_elem1575) + (_etype1580, _size1577) = iprot.readListBegin() + for _i1581 in xrange(_size1577): + _elem1582 = RuntimeStat() + _elem1582.read(iprot) + self.success.append(_elem1582) iprot.readListEnd() else: iprot.skip(ftype) @@ -53159,8 +53393,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1576 in self.success: - iter1576.write(oprot) + for iter1583 in self.success: + iter1583.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 4f317b3453..298778e968 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -9984,6 +9984,7 @@ class PartitionsByExprRequest: - defaultPartitionName - maxParts - catName + - order """ thrift_spec = ( @@ -9994,15 +9995,17 @@ class PartitionsByExprRequest: (4, TType.STRING, 'defaultPartitionName', None, None, ), # 4 (5, TType.I16, 'maxParts', None, -1, ), # 5 (6, TType.STRING, 'catName', None, None, ), # 6 + (7, TType.STRING, 'order', None, None, ), # 7 ) - def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4], catName=None,): + def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4], catName=None, order=None,): self.dbName = dbName self.tblName = tblName self.expr = expr self.defaultPartitionName = defaultPartitionName self.maxParts = maxParts self.catName = catName + self.order = order def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -10043,6 +10046,11 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.order = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -10077,6 +10085,10 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 6) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.order is not None: + oprot.writeFieldBegin('order', TType.STRING, 7) + oprot.writeString(self.order) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -10098,6 +10110,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.defaultPartitionName) value = (value * 31) ^ hash(self.maxParts) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.order) return value def __repr__(self): diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index e64ae0ead2..ed1de08b99 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2255,6 +2255,7 @@ class PartitionsByExprRequest DEFAULTPARTITIONNAME = 4 MAXPARTS = 5 CATNAME = 6 + ORDER = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2262,7 +2263,8 @@ class PartitionsByExprRequest EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true}, DEFAULTPARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'defaultPartitionName', :optional => true}, MAXPARTS => {:type => ::Thrift::Types::I16, :name => 'maxParts', :default => -1, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + ORDER => {:type => ::Thrift::Types::STRING, :name => 'order', :optional => true} } def struct_fields; FIELDS; end diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index da74d4c317..88458de5dd 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1399,6 +1399,23 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_names_ps failed: unknown result') end + def get_partition_names_req(req) + send_get_partition_names_req(req) + return recv_get_partition_names_req() + end + + def send_get_partition_names_req(req) + send_message('get_partition_names_req', Get_partition_names_req_args, :req => req) + end + + def recv_get_partition_names_req() + result = receive_message(Get_partition_names_req_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_names_req failed: unknown result') + end + def get_partitions_by_filter(db_name, tbl_name, filter, max_parts) send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) return recv_get_partitions_by_filter() @@ -4952,6 +4969,19 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_partition_names_ps', seqid) end + def process_get_partition_names_req(seqid, iprot, oprot) + args = read_args(iprot, Get_partition_names_req_args) + result = Get_partition_names_req_result.new() + begin + result.success = @handler.get_partition_names_req(args.req) + rescue ::MetaException => o1 + result.o1 = o1 + rescue ::NoSuchObjectException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_partition_names_req', seqid) + end + def process_get_partitions_by_filter(seqid, iprot, oprot) args = read_args(iprot, Get_partitions_by_filter_args) result = Get_partitions_by_filter_result.new() @@ -9882,6 +9912,42 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Get_partition_names_req_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::PartitionsByExprRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_partition_names_req_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRING}}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Get_partitions_by_filter_args include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 205c867db1..cb065f0899 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2343,6 +2343,27 @@ public boolean tableExists(String catName, String dbName, String tableName) thro isClientFilterEnabled, filterHook, catName, db_name, tbl_name, partNames); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, short maxParts) + throws MetaException, TException, NoSuchObjectException { + + PartitionsByExprRequest req = new PartitionsByExprRequest( + dbName, tblName, ByteBuffer.wrap(exprBytes)); + if (defaultPartName != null) { + req.setDefaultPartitionName(defaultPartName); + } + if (maxParts >= 0) { + req.setMaxParts(maxParts); + } + if (order != null) { + req.setOrder(order); + } + req.setCatName(catName); + return FilterUtils.filterPartitionNamesIfEnabled(isClientFilterEnabled, filterHook, catName, + dbName, tblName, client.get_partition_names_req(req)); + } + @Override public int getNumPartitionsByFilter(String db_name, String tbl_name, String filter) throws TException { diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index b58b1e4a07..ac92a08292 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1212,6 +1212,24 @@ PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tabl List part_vals, int max_parts) throws MetaException, TException, NoSuchObjectException; + /** + * Get a list of partition names matching the specified filter and return in order if specified. + * @param dbName database name. + * @param tblName table name. + * @param defaultPartName default partition name. + * @param exprBytes expression, serialized from ExprNodeDesc. + * @param order ordered the resulting list. + * @param maxParts maximum number of partition names to return, or -1 to return all that are + * found. + * @return list of matching partition names. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws NoSuchObjectException no such table. + */ + List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, short maxParts) + throws MetaException, TException, NoSuchObjectException; + /** * Get a list of partition values * @param request request diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 62f5773f9b..88fe83b6c8 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -957,4 +957,30 @@ public static boolean isView(Table table) { public static List> compilePatternsToPredicates(List patterns) { return patterns.stream().map(pattern -> compile(pattern).asPredicate()).collect(Collectors.toList()); } + + /** + * Get order specs from a represented string. + * @param order specified in partColIndex[,partColIndex]*:[-|\+]+ pattern + * @return the order specs + */ + public static List makeOrderSpecs(String order) { + if (StringUtils.isBlank(order) || order.split(":").length != 2) { + return new ArrayList(); + } + String[] parts = order.split(":"); + String[] poses = parts[0].split(","); + char[] chars = parts[1].toCharArray(); + List orderSpecs = new ArrayList(chars.length); + if (poses.length != chars.length) { + throw new IllegalArgumentException("The length of partition keys and sort order" + + " do not mismatch, order: " + order); + } + for (int i = 0; i < poses.length; i++) { + Object[] spec = new Object[2]; + spec[0] = Integer.parseInt(poses[i]); + spec[1] = ('+' == chars[i]) ? "ASC" : "DESC"; + orderSpecs.add(spec); + } + return orderSpecs; + } } diff --git standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 1e3d6e9b8b..942d2cd62f 100644 --- standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -764,8 +764,9 @@ struct PartitionsByExprRequest { 2: required string tblName, 3: required binary expr, 4: optional string defaultPartitionName, - 5: optional i16 maxParts=-1 - 6: optional string catName + 5: optional i16 maxParts=-1, + 6: optional string catName, + 7: optional string order } struct TableStatsResult { @@ -2239,6 +2240,9 @@ service ThriftHiveMetastore extends fb303.FacebookService 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) throws(1:MetaException o1, 2:NoSuchObjectException o2) + list get_partition_names_req(1:PartitionsByExprRequest req) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + // get the partitions matching the given partition filter list get_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter, 4:i16 max_parts=-1) diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 32494ae257..6c262ffcb7 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -6221,6 +6221,31 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n return ret; } + @Override + public List get_partition_names_req(PartitionsByExprRequest req) + throws MetaException, NoSuchObjectException, TException { + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + String dbName = req.getDbName(), tblName = req.getTblName(); + startTableFunction("get_partition_names_req", catName, + dbName, tblName); + fireReadTablePreEvent(catName, dbName, tblName); + List ret = null; + Exception ex = null; + try { + authorizeTableForPartitionMetadata(catName, dbName, tblName); + ret = getMS().listPartitionNames(catName, dbName, tblName, + req.getDefaultPartitionName(), req.getExpr(), req.getOrder(), req.getMaxParts()); + ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, + filterHook, catName, dbName, tblName, ret); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partition_names_req", ret != null, ex, tblName); + } + return ret; + } + @Override public List partition_name_to_vals(String part_name) throws TException { if (part_name.length() == 0) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index d1558876f1..fafde011d5 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -496,6 +496,135 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ queryParams, pms.toArray(), queryText); } + public List getPartitionNamesViaSql(SqlFilterForPushdown filter, + String defaultPartName, String order, Integer max) throws MetaException { + String catName = filter.table.isSetCatName() ? filter.table.getCatName() : + DEFAULT_CATALOG_NAME; + if (filter.joins.isEmpty()) { + int psize = filter.table.getPartitionKeysSize(); + for (int i = 0; i < psize; i++) { + filter.joins.add(null); + } + } + List orderSpecs = MetaStoreUtils.makeOrderSpecs(order); + boolean dbHasJoinCastBug = DatabaseProduct.hasJoinOperationOrderBug(dbType); + String defaultPartitionName = (defaultPartName == null || + defaultPartName.isEmpty()) ? this.defaultPartName : defaultPartName; + + return getPartitionNamesViaSqlInternal(catName, filter, + defaultPartitionName, orderSpecs, max, dbHasJoinCastBug); + } + + private List getPartitionNamesViaSqlInternal(String catName, SqlFilterForPushdown filter, + String defaultPartName, List orderSpecs, Integer max, boolean dbHasJoinCastBug) + throws MetaException { + boolean doTrace = LOG.isDebugEnabled(); + String dbName = filter.table.getDbName(), tblName = filter.table.getTableName(), + sqlFilter = filter.filter; + List paramsForFilter = filter.params; + List joins = filter.joins; + StringBuilder orderColumns = new StringBuilder(), orderClause = new StringBuilder(); + int i = 0; + List paramsForOrder = new ArrayList(); + for (Object[] orderSpec: orderSpecs) { + int partColIndex = (int)orderSpec[0]; + String orderAlias = "ODR" + (i++); + String tableValue, tableAlias; + if (joins.get(partColIndex) == null) { + tableAlias = "ORDER" + partColIndex; + joins.set(partColIndex, "inner join " + PARTITION_KEY_VALS + " \"" + tableAlias + + "\" on \"" + tableAlias + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\"" + + " and \"" + tableAlias + "\".\"INTEGER_IDX\" = " + partColIndex); + tableValue = " \"" + tableAlias + "\".\"PART_KEY_VAL\" "; + } else { + tableAlias = "FILTER" + partColIndex; + tableValue = " \"" + tableAlias + "\".\"PART_KEY_VAL\" "; + } + + String tableColumn = tableValue; + String colType = filter.table.getPartitionKeys().get(partColIndex).getType(); + PartitionFilterGenerator.FilterType type = + PartitionFilterGenerator.FilterType.fromType(colType); + if (type == PartitionFilterGenerator.FilterType.Date) { + if (dbType == DatabaseProduct.ORACLE) { + tableValue = "TO_DATE(" + tableValue + ", 'YYYY-MM-DD')"; + } else { + tableValue = "cast(" + tableValue + " as date)"; + } + } else if (type == PartitionFilterGenerator.FilterType.Integral) { + tableValue = "CAST(" + tableColumn + " AS decimal(21,0))"; + } + String tableValue0 = tableValue; + tableValue = " (case when " + tableColumn + " <> ?"; + paramsForOrder.add(defaultPartName); + if (dbHasJoinCastBug) { + tableValue += (" and " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + + DBS + ".\"CTLG_NAME\" = ? and " + + "\"" + tableAlias + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\" and " + + "\"" + tableAlias + "\".\"INTEGER_IDX\" = " + partColIndex); + paramsForOrder.add(tblName.toLowerCase()); + paramsForOrder.add(dbName.toLowerCase()); + paramsForOrder.add(catName.toLowerCase()); + } + tableValue += " then " + tableValue0 + " else null end) AS \"" + orderAlias + "\" "; + orderColumns.append(tableValue).append(","); + orderClause.append(" \"").append(orderAlias).append("\" ") + .append((String)orderSpec[1]).append(","); + } + + for (int j = 0; j < joins.size(); j++) { + if (joins.get(j) == null) { + joins.remove(j--); + } + } + if (orderClause.length() > 0) { + orderClause.setLength(orderClause.length() - 1); + orderColumns.setLength(orderColumns.length() - 1); + } + + String orderCls = " order by " + + (orderClause.length() > 0 ? orderClause.toString() : "\"PART_NAME\" asc"); + String columns = orderColumns.length() > 0 ? (", " + orderColumns.toString()) : ""; + String queryText = + "select " + PARTITIONS + ".\"PART_NAME\"" + columns + " from " + PARTITIONS + " " + + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" " + + " and " + TBLS + ".\"TBL_NAME\" = ? " + + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + + " and " + DBS + ".\"NAME\" = ? " + + join(joins, ' ') + + " where " + DBS + ".\"CTLG_NAME\" = ? " + + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderCls; + + Object[] params = new Object[paramsForFilter.size() + paramsForOrder.size() + 3]; + i = 0; + for (; i < paramsForOrder.size(); i++) { + params[i] = paramsForOrder.get(i); + } + params[i] = tblName; + params[i+1] = dbName; + params[i+2] = catName; + for (int j = 0; j < paramsForFilter.size(); j++) { + params[i + j + 3] = paramsForFilter.get(j); + } + + Query query = pm.newQuery("javax.jdo.query.SQL", queryText); + List partNames = new LinkedList(); + int limit = (max == null ? -1 : max); + try { + long start = doTrace ? System.nanoTime() : 0; + List sqlResult = executeWithArray(query, params, queryText, limit); + long queryTime = doTrace ? System.nanoTime() : 0; + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, queryTime); + for (Object result : sqlResult) { + Object obj = !columns.isEmpty() ? ((Object[]) result)[0] : result; + partNames.add((String)obj); + } + } finally { + query.closeAll(); + } + return partNames; + } + /** * Gets partitions by using direct SQL queries. * @param catName Metastore catalog name. @@ -648,6 +777,15 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ private final List joins = new ArrayList<>(); private String filter; private Table table; + // whether compact nulls in joins when generating sql filter. + private boolean compactJoins; + SqlFilterForPushdown() { + this.compactJoins = true; + } + SqlFilterForPushdown(Table table, boolean compactJoins) { + this.table = table; + this.compactJoins = compactJoins; + } } public boolean generateSqlFilterForPushdown( @@ -662,7 +800,7 @@ public boolean generateSqlFilterForPushdown(Table table, ExpressionTree tree, St result.table = table; result.filter = PartitionFilterGenerator.generateSqlFilter(table, tree, result.params, result.joins, dbHasJoinCastBug, ((defaultPartitionName == null) ? defaultPartName : defaultPartitionName), - dbType, schema); + dbType, schema, result.compactJoins); return result.filter != null; } @@ -1031,7 +1169,7 @@ private PartitionFilterGenerator(Table table, List params, List */ private static String generateSqlFilter(Table table, ExpressionTree tree, List params, List joins, boolean dbHasJoinCastBug, String defaultPartName, - DatabaseProduct dbType, String schema) throws MetaException { + DatabaseProduct dbType, String schema, boolean compactJoins) throws MetaException { assert table != null; if (tree == null) { // consistent with other APIs like makeExpressionTree, null is returned to indicate that @@ -1050,9 +1188,13 @@ private static String generateSqlFilter(Table table, ExpressionTree tree, List listPartitionNames(final String catName, final String dbName, final String tblName, + final String defaultPartName, final byte[] exprBytes, + final String order, final short maxParts) throws MetaException, NoSuchObjectException { + final String defaultPartitionName = getDefaultPartitionName(defaultPartName); + final boolean isEmptyFilter = exprBytes.length == 1 && exprBytes[0] == -1; + ExpressionTree tmp = null; + if (!isEmptyFilter) { + tmp = PartFilterExprUtil.makeExpressionTree(expressionProxy, exprBytes, + getDefaultPartitionName(defaultPartName)); + } + final ExpressionTree exprTree = tmp; + return new GetListHelper(catName, dbName, tblName, true, true) { + private List getPartNamesPrunedByExpr(Table table, boolean allowJdo) throws MetaException { + int max = isEmptyFilter ? maxParts : -1; + List result; + if (allowJdo) { + result = getPartitionNamesViaOrm(table, ExpressionTree.EMPTY_TREE, order, max, true); + } else { + SqlFilterForPushdown filter = new SqlFilterForPushdown(table, false); + result = directSql.getPartitionNamesViaSql(filter, defaultPartitionName, order, max); + } + if (!isEmptyFilter) { + expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), exprBytes, defaultPartitionName, result); + if (maxParts >= 0 && result.size() > maxParts) { + result = result.subList(0, maxParts); + } + } + return result; + } + @Override + protected List getSqlResult(GetHelper> ctx) throws MetaException { + SqlFilterForPushdown filter = new SqlFilterForPushdown(ctx.getTable(), false); + List partNames = null; + if (exprTree != null) { + if (directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, defaultPartitionName, filter)) { + partNames = directSql.getPartitionNamesViaSql(filter, defaultPartitionName, order, (int)maxParts); + } + } + if (partNames == null) { + partNames = getPartNamesPrunedByExpr(ctx.getTable(), false); + } + return partNames; + } + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException, NoSuchObjectException { + List result = null; + if (exprTree != null) { + try { + result = getPartitionNamesViaOrm(ctx.getTable(), exprTree, order, (int)maxParts, true); + } catch (MetaException e) { + result = null; + } + } + if (result == null) { + result = getPartNamesPrunedByExpr(ctx.getTable(), true); + } + return result; + } + }.run(true); + } + + private List getPartitionNamesViaOrm(Table table, ExpressionTree tree, String order, + Integer maxParts, boolean isValidatedFilter) throws MetaException { + Map params = new HashMap(); + String jdoFilter = makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, + params, isValidatedFilter); + if (jdoFilter == null) { + assert !isValidatedFilter; + return null; + } + + Query query = pm.newQuery( + "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition" + ); + query.setFilter(jdoFilter); + List orderSpecs = MetaStoreUtils.makeOrderSpecs(order); + StringBuilder builder = new StringBuilder(); + for (Object[] spec : orderSpecs) { + // TODO: order by casted value if the type of partition key is not string + builder.append("values.get(").append(spec[0]).append(") ").append(spec[1]).append(","); + } + if (builder.length() > 0) { + builder.setLength(builder.length() - 1); + query.setOrdering(builder.toString()); + } else { + query.setOrdering("partitionName ascending"); + } + + if (maxParts > -1) { + query.setRange(0, maxParts); + } + + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + Collection jdoRes = (Collection)query.executeWithMap(params); + List result = new LinkedList(); + for (Object partName : jdoRes) { + result.add((String)partName); + } + query.closeAll(); + return result; + } + private String extractPartitionKey(FieldSchema key, List pkeys) { StringBuilder buffer = new StringBuilder(256); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index c334421adf..8b66a46b6f 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -482,6 +482,22 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) throws MetaException; + /** + * Get a partial or complete list of names for partitions of a table. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param defaultPartName default partition name. + * @param exprBytes expression for filtering resulting list, serialized from ExprNodeDesc. + * @param order ordered the resulting list. + * @param maxParts maximum number of partitions to retrieve, -1 for all. + * @return list of partition names. + * @throws MetaException there was an error accessing the RDBMS + */ + List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, + short maxParts) throws MetaException, NoSuchObjectException; + /** * Get a list of partition values as one big struct. * @param catName catalog name. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index f31cc5d7a1..8d28c1fd5a 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -1518,6 +1518,12 @@ private void validateTableType(Table tbl) { return partitionNames; } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, String defaultPartName, + byte[] exprBytes, String order, short maxParts) throws MetaException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String dbName, String tblName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f7032b93d1..e31dc064c1 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -362,6 +362,13 @@ public void updateCreationMetadata(String catName, String dbname, String tablena return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, String defaultPartName, + byte[] exprBytes, String order, short maxParts) throws MetaException, NoSuchObjectException { + return objectStore.listPartitionNames(catName, dbName, tblName, + defaultPartName, exprBytes, order, maxParts); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index bea7e9572b..89acdcc55b 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -362,6 +362,13 @@ public void updateCreationMetadata(String catName, String dbname, String tablena return Collections.emptyList(); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, String defaultPartName, + byte[] exprBytes, String order, short maxParts) throws MetaException, NoSuchObjectException { + + return Collections.emptyList(); + } + @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index fc071f9a20..3534539afb 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -1416,6 +1416,14 @@ public Partition getPartition(String db_name, String tbl_name, return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } + @Override + public List listPartitionNames(String catName, String dbName, String tblName, + String defaultPartName, byte[] exprBytes, String order, + short maxParts) throws MetaException, TException, NoSuchObjectException { + + throw new UnsupportedOperationException(); + } + @Override public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) throws MetaException, TException, NoSuchObjectException {