diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index 5da06cb328..9197048846 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -453,7 +453,8 @@ struct PartitionsByExprRequest { 2: required string tblName, 3: required binary expr, 4: optional string defaultPartitionName, - 5: optional i16 maxParts=-1 + 5: optional i16 maxParts=-1, + 6: optional string order } struct TableStatsResult { @@ -1013,6 +1014,9 @@ service ThriftHiveMetastore extends fb303.FacebookService 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) throws(1:MetaException o1, 2:NoSuchObjectException o2) + list get_partition_names_req(1:PartitionsByExprRequest req) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + // get the partitions matching the given partition filter list get_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter, 4:i16 max_parts=-1) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index ad26c438f0..c9d4f9f03f 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -3016,6 +3016,32 @@ public Partition get_partition_with_auth(final String db_name, } + @Override + public List get_partition_names_req(PartitionsByExprRequest req) + throws MetaException, NoSuchObjectException, TException { + String db_name = req.getDbName(), tbl_name = req.getTblName(); + startTableFunction("get_partition_names_req", db_name, tbl_name); + fireReadTablePreEvent(db_name, tbl_name); + List ret = null; + Exception ex = null; + try { + ret = getMS().listPartitionNamesByFilter(db_name, tbl_name, req.getDefaultPartitionName(), + req.getExpr(), req.getOrder(), req.getMaxParts()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_partition_names_req", ret != null, ex, tbl_name); + } + return ret; + } + @Override public List get_partitions_with_auth(final String dbName, final String tblName, final short maxParts, final String userName, diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 73d1ddbffa..dc336611d0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -1329,6 +1329,25 @@ public boolean tableExists(String tableName) throws MetaException, client.get_partition_names(dbName, tblName, max)); } + @Override + public List listPartitionNames(String dbName, String tblName, + String defaultPartitionName, byte[] expr, String order, short max_parts) + throws MetaException, TException { + PartitionsByExprRequest req = new PartitionsByExprRequest( + dbName, tblName, ByteBuffer.wrap(expr)); + if (defaultPartitionName != null) { + req.setDefaultPartitionName(defaultPartitionName); + } + if (max_parts >= 0) { + req.setMaxParts(max_parts); + } + if (order != null) { + req.setOrder(order); + } + return filterHook.filterPartitionNames(dbName, tblName, + client.get_partition_names_req(req)); + } + @Override public List listPartitionNames(String db_name, String tbl_name, List part_vals, short max_parts) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 6d3b4a5b87..eb3e1702f0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -521,6 +521,9 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException, TException; + List listPartitionNames(String db_name, String tbl_name, + String defaultPartitionName, byte[] filter, String order, short max_parts) throws MetaException, TException; + List listPartitionNames(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException, NoSuchObjectException; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 220893d8b8..297d6b082f 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -29,6 +29,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -42,6 +43,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats; @@ -350,6 +352,106 @@ public Database getDatabase(String dbName) throws MetaException{ partNames, new ArrayList(), null); } + /** + * Gets partition names by using direct SQL queries. + * @param table The table. + * @param tree The expression tree from which the SQL filter will be derived. + * @param max The maximum number of partitions to return. + * @return List of partitions. Null if SQL filter cannot be derived. + */ + public List getPartitionNamesViaSqlFilter( + Table table, ExpressionTree tree, String order, Integer max) throws MetaException { + List params = new ArrayList(); + List joins = new ArrayList(); + // Derby and Oracle do not interpret filters ANSI-properly in some cases and need a workaround. + boolean dbHasJoinCastBug = (dbType == DB.DERBY || dbType == DB.ORACLE); + String sqlFilter = PartitionFilterGenerator.generateSqlFilter( + table, tree, params, joins, dbHasJoinCastBug, dbType, true); + if (sqlFilter == null) { + return null; // Cannot make SQL filter to push down. + } + return getPartitionNamesViaSqlFilterInternal(table, + sqlFilter, params, joins, order, max); + } + + public List getPartitionNamesViaSqlFilterInternal(Table table, + String sqlFilter, List paramsForFilter, + List joins, String order, Integer max) throws MetaException { + boolean doTrace = LOG.isDebugEnabled(); + LinkedHashMap> orderMapping = + MetaStoreUtils.makeOrder(table, order); + StringBuilder orderExpr = new StringBuilder(); + StringBuilder orderFields = new StringBuilder(); + int index = 0; + for (Map.Entry> entry : orderMapping.entrySet()) { + int partColIndex = entry.getKey(); + String orderField = "ORDE" + (index++); + String selectExpr; + if (joins.get(partColIndex) == null) { + joins.set(partColIndex, "inner join \"PARTITION_KEY_VALS\" \"ORDER" + partColIndex + + "\" on \"ORDER" + partColIndex + "\".\"PART_ID\" = \"PARTITIONS\".\"PART_ID\"" + + " and \"ORDER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex); + selectExpr = " \"ORDER" + partColIndex + "\".\"PART_KEY_VAL\" "; + } else { + selectExpr = " \"FILTER" + partColIndex + "\".\"PART_KEY_VAL\" "; + } + String partType = entry.getValue().getSecond(); + if (partType.equals(serdeConstants.DATE_TYPE_NAME)) { + selectExpr = "CAST(" + selectExpr + " AS date)"; + } else if (serdeConstants.IntegralTypes.contains(partType)) { + selectExpr = "CAST(" + selectExpr + " AS decimal(21,0))"; + } + selectExpr += " AS \"" + orderField + "\""; + orderFields.append(selectExpr).append(","); + orderExpr.append("\"").append(orderField).append("\" ").append(entry.getValue().getFirst()).append(","); + } + for (int i = 0; i < joins.size(); i++) { + if (joins.get(i) == null) { + joins.remove(i--); + } + } + if (orderExpr.length() > 0) { + orderExpr.setLength(orderExpr.length() - 1); + orderFields.setLength(orderFields.length() - 1); + } + + String orderForFilter = " order by " + + (orderExpr.length() > 0 ? orderExpr.toString() : "\"PART_NAME\" asc"); + String orderfs = orderFields.length() > 0 ? ", " + orderFields.toString() : ""; + String queryText = + "select \"PARTITIONS\".\"PART_NAME\"" + orderfs + " from \"PARTITIONS\"" + + " inner join \"TBLS\" on \"PARTITIONS\".\"TBL_ID\" = \"TBLS\".\"TBL_ID\" " + + " and \"TBLS\".\"TBL_NAME\" = ? " + + " inner join \"DBS\" on \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" " + + " and \"DBS\".\"NAME\" = ? " + + join(joins, ' ') + + (StringUtils.isBlank(sqlFilter) ? "" : (" where " + sqlFilter)) + orderForFilter; + + Object[] params = new Object[paramsForFilter.size() + 2]; + params[0] = table.getTableName(); + params[1] = table.getDbName(); + for (int i = 0; i < paramsForFilter.size(); ++i) { + params[i + 2] = paramsForFilter.get(i); + } + + long start = doTrace ? System.nanoTime() : 0; + Query query = pm.newQuery("javax.jdo.query.SQL", queryText); + if (max != null) { + query.setRange(0, max.shortValue()); + } + List sqlResult = executeWithArray(query, params, queryText); + long queryTime = doTrace ? System.nanoTime() : 0; + List partNames = new ArrayList<>(sqlResult.size()); + for (Object result : sqlResult) { + String partName = !orderfs.isEmpty() ? + String.valueOf(((Object[])result)[0]) : String.valueOf(result); + partNames.add(partName); + } + timingTrace(doTrace, queryText, start, queryTime); + query.closeAll(); + return partNames; + } + /** * Gets partitions by using direct SQL queries. * @param table The table. @@ -365,7 +467,7 @@ public Database getDatabase(String dbName) throws MetaException{ // Derby and Oracle do not interpret filters ANSI-properly in some cases and need a workaround. boolean dbHasJoinCastBug = (dbType == DB.DERBY || dbType == DB.ORACLE); String sqlFilter = PartitionFilterGenerator.generateSqlFilter( - table, tree, params, joins, dbHasJoinCastBug, dbType); + table, tree, params, joins, dbHasJoinCastBug, dbType, false); if (sqlFilter == null) { return null; // Cannot make SQL filter to push down. } @@ -931,7 +1033,7 @@ private PartitionFilterGenerator(Table table, List params, List * @return the string representation of the expression tree */ private static String generateSqlFilter(Table table, ExpressionTree tree, List params, - List joins, boolean dbHasJoinCastBug, DB dbType) throws MetaException { + List joins, boolean dbHasJoinCastBug, DB dbType, boolean fetchPartNamesOnly) throws MetaException { assert table != null; if (tree.getRoot() == null) { return ""; @@ -944,10 +1046,12 @@ private static String generateSqlFilter(Table table, ExpressionTree tree, List> makeOrder(Table tab, String order) { + LinkedHashMap> ordering = + new LinkedHashMap>(); + String[] parts; + if (order != null && (parts = order.split(":")).length == 2) { + String[] poses = parts[0].split(","); + if (poses.length != parts[1].length()) { + throw new IllegalArgumentException("Fields and orders do not mismatch"); + } + for (int i = 0; i < poses.length; i++) { + int pos = Integer.valueOf(poses[i]); + String or = ('+' == parts[1].charAt(i)) ? "ASC" : "DESC"; + FieldSchema partitionKey = tab.getPartitionKeys().get(pos); + ordering.put(pos, ObjectPair.create(or, partitionKey.getType())); + } + } + return ordering; + } + } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 8c4a58f121..c56bce30b8 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -2256,6 +2256,11 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, List result) throws MetaException { result.addAll(getPartitionNamesNoTxn( table.getDbName(), table.getTableName(), maxParts)); + return prunePartitionsByExpr(table, expr, defaultPartName, result); + } + + private boolean prunePartitionsByExpr(Table table, byte[] expr, String defaultPartName, List result) + throws MetaException { List columnNames = new ArrayList(); List typeInfos = new ArrayList(); for (FieldSchema fs : table.getPartitionKeys()) { @@ -2761,54 +2766,58 @@ private String makeParameterDeclarationStringObj(Map params) { } @Override - public List listPartitionNamesByFilter(String dbName, String tableName, String filter, - short maxParts) throws MetaException { - boolean success = false; - Query query = null; - List partNames = new ArrayList(); - try { - openTransaction(); - LOG.debug("Executing listMPartitionNamesByFilter"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - MTable mtable = getMTable(dbName, tableName); - if (mtable == null) { - // To be consistent with the behavior of listPartitionNames, if the - // table or db does not exist, we return an empty list - return partNames; - } - Map params = new HashMap(); - String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params); - query = - pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where " + queryFilterString); - if (maxParts >= 0) { - // User specified a row limit, set it on the Query - query.setRange(0, maxParts); - } - LOG.debug("Filter specified is " + filter + "," + " JDOQL filter is " + queryFilterString); - LOG.debug("Parms is " + params); - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setOrdering("partitionName ascending"); - query.setResult("partitionName"); - Collection names = (Collection) query.executeWithMap(params); - partNames = new ArrayList(); - for (Iterator i = names.iterator(); i.hasNext();) { - partNames.add((String) i.next()); - } - LOG.debug("Done executing query for listMPartitionNamesByFilter"); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); - } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } - } - return partNames; + public List listPartitionNamesByFilter(String dbName, final String tableName, + final String defaultPartitionName, final byte[] expr, final String order, final short maxParts) + throws MetaException, NoSuchObjectException { + + return new GetListHelper(dbName, tableName, true, false) { + private List returnListWithNulls(int size) { + List l = new ArrayList(); + for (int i = 0; i < size; i++) { + l.add(null); + } + return l; + } + private List getPartitionNamesByExprNoTxn(Table table) throws MetaException { + List joins = returnListWithNulls(table.getPartitionKeysSize()); + List result = directSql.getPartitionNamesViaSqlFilterInternal(table, + null, new ArrayList(), joins, order, -1); + prunePartitionsByExpr(table, expr, defaultPartitionName, result); + if (maxParts >=0 && result.size() > maxParts) { + result = result.subList(0, maxParts); + } + return result; + } + @Override + protected List getSqlResult(GetHelper> ctx) throws MetaException { + if (expr.length == 1 && expr[0] == -1) { + List joins = returnListWithNulls(ctx.getTable().getPartitionKeysSize()); + return directSql.getPartitionNamesViaSqlFilterInternal(ctx.getTable(), + null, new ArrayList(), joins, order, (int)maxParts); + } + String filter = null; + try { + filter = expressionProxy.convertExprToFilter(expr); + } catch (MetaException ex) { + throw new IMetaStoreClient.IncompatibleMetastoreException(ex.getMessage()); + } + + final ExpressionTree exprTree = makeExpressionTree(filter); + List result = null; + if (exprTree != null) { + result = directSql.getPartitionNamesViaSqlFilter(ctx.getTable(), exprTree, order, (int)maxParts); + } + if (result == null) { + result = getPartitionNamesByExprNoTxn(ctx.getTable()); + } + return result; + } + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException, NoSuchObjectException { + return Collections.emptyList(); + } + }.run(true); } @Override diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 7c85eea213..6e15730f6e 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -192,7 +192,8 @@ public abstract void alterTable(String dbname, String name, Table newTable) String tbl_name, short max_parts) throws MetaException; public abstract List listPartitionNamesByFilter(String db_name, - String tbl_name, String filter, short max_parts) throws MetaException; + String tbl_name, String defaultPartitionName, byte[] filter, String order, short max_parts) + throws MetaException, NoSuchObjectException; public abstract void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException; diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f184c568b5..c598e677d1 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -251,9 +251,10 @@ public void alterTable(String dbName, String name, Table newTable) } @Override - public List listPartitionNamesByFilter(String dbName, String tblName, - String filter, short maxParts) throws MetaException { - return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, maxParts); + public List listPartitionNamesByFilter(String db_name, String tbl_name, String defaultPartitionName, + byte[] filter, String order, short max_parts) throws MetaException, NoSuchObjectException { + return objectStore.listPartitionNamesByFilter(db_name, tbl_name, + defaultPartitionName, filter, order, max_parts); } @Override diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 83fb4bb900..2b0f02cba1 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -255,8 +255,8 @@ public void alterTable(String dbname, String name, Table newTable) throws Invali } @Override - public List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, - short max_parts) throws MetaException { + public List listPartitionNamesByFilter(String db_name, String tbl_name, String defaultPartitionName, + byte[] filter, String order, short max_parts) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 7d972e5448..05cdf59227 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -51,6 +51,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -152,6 +153,11 @@ import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.GrantDesc; import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL; @@ -1919,11 +1925,39 @@ private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveExc if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tabName); } - if (showParts.getPartSpec() != null) { + if (showParts.getFilterExpr() != null || showParts.getOrderExpr() != null) { + ExprNodeDesc pred = showParts.getFilterExpr(); + if (showParts.getPartSpec() != null) { + List fieldSchemas = tbl.getPartitionKeys(); + Map infoMap = new LinkedHashMap<>(); + for (FieldSchema part_col : fieldSchemas) { + ColumnInfo info = new ColumnInfo(part_col.getName(), + TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true); + infoMap.put(part_col.getName(), info); + } + for (Map.Entry entry : showParts.getPartSpec().entrySet()) { + ColumnInfo part_col = infoMap.get(entry.getKey()); + TypeInfo stringTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + Object val = entry.getValue(); + if (!part_col.getType().equals(stringTypeInfo)) { + Converter converter = ObjectInspectorConverters.getConverter( + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(stringTypeInfo), + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(part_col.getType())); + val = converter.convert(val); + } + + ExprNodeDesc exprNodeDesc = + ExprNodeGenericFuncDesc.newInstance(FunctionRegistry.getFunctionInfo("=").getGenericUDF(), + Lists.newArrayList(new ExprNodeColumnDesc(part_col), new ExprNodeConstantDesc(part_col.getType(), val))); + pred = (pred == null) ? exprNodeDesc : ExprNodeDescUtils.mergePredicates(exprNodeDesc, pred); + } + } + parts = db.getPartitionNames(tbl, (ExprNodeGenericFuncDesc) pred, showParts.getOrderExpr(), showParts.getLimit()); + } else if (showParts.getPartSpec() != null) { parts = db.getPartitionNames(tbl.getDbName(), - tbl.getTableName(), showParts.getPartSpec(), (short) -1); + tbl.getTableName(), showParts.getPartSpec(), showParts.getLimit()); } else { - parts = db.getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short) -1); + parts = db.getPartitionNames(tbl.getDbName(), tbl.getTableName(), showParts.getLimit()); } // write the results in the file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 48b781de43..cdd517b497 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2068,6 +2068,24 @@ public boolean dropPartition(String dbName, String tableName, List partV return names; } + public List getPartitionNames(Table tbl, + ExprNodeGenericFuncDesc expr, String order, short max_parts) throws HiveException { + List names = null; + byte[] exprBytes = {(byte)-1}; + if (expr != null) { + exprBytes = Utilities.serializeExpressionToKryo(expr); + } + String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + try { + names = getMSC().listPartitionNames(tbl.getDbName(), + tbl.getTableName(), defaultPartitionName, exprBytes, order, max_parts); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + return names; + } + /** * get all the partitions that the table has * diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index c8362688b3..a18968a6de 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -2127,7 +2128,8 @@ private void analyzeDescDatabase(ASTNode ast) throws SemanticException { private void analyzeShowPartitions(ASTNode ast) throws SemanticException { ShowPartitionsDesc showPartsDesc; String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - List> partSpecs = getPartitionSpecs(getTable(tableName), ast); + Table table = getTable(tableName); + List> partSpecs = getPartitionSpecs(table, ast); // We only can have a single partition spec assert (partSpecs.size() <= 1); Map partSpec = null; @@ -2138,12 +2140,82 @@ private void analyzeShowPartitions(ASTNode ast) throws SemanticException { validateTable(tableName, null); showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); - inputs.add(new ReadEntity(getTable(tableName))); + analyzeShowPartitionsConstraints(ast, table, showPartsDesc); + inputs.add(new ReadEntity(table)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showPartsDesc), conf)); setFetchTask(createFetchTask(showPartsDesc.getSchema())); } + private void analyzeShowPartitionsConstraints(ASTNode ast, Table tab, + ShowPartitionsDesc showPartsDesc) throws SemanticException { + + for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) { + ASTNode astChild = (ASTNode)ast.getChild(childIndex); + if (astChild.getType() == HiveParser.TOK_LIMIT) { + short limit = Short.valueOf(((ASTNode)astChild.getChild(0)).getText()); + showPartsDesc.setLimit(limit); + } else { + RowResolver rwsch = new RowResolver(); + for (FieldSchema part_col : tab.getPartCols()) { + rwsch.put(tab.getTableName(), part_col.getName(), new ColumnInfo(part_col.getName(), + TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true)); + } + TypeCheckCtx tcCtx = new TypeCheckCtx(rwsch); + if (astChild.getType() == HiveParser.TOK_WHERE) { + ASTNode conds = (ASTNode) astChild.getChild(0); + Map nodeOutputs = TypeCheckProcFactory.genExprNode(conds, tcCtx); + ExprNodeDesc desc = nodeOutputs.get(conds); + if (!(desc instanceof ExprNodeGenericFuncDesc) + || !((ExprNodeGenericFuncDesc) desc).getTypeInfo().equals( + TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.BOOLEAN_TYPE_NAME))) { + throw new SemanticException("Not a filter expr: " + (desc == null ? "null" : desc.getExprString())); + } + showPartsDesc.setFilterExpr(desc); + } + + if (astChild.getType() == HiveParser.TOK_ORDERBY) { + StringBuilder colIndices = new StringBuilder(); + StringBuilder order = new StringBuilder(); + int ccount = astChild.getChildCount(); + for (int i = 0; i < ccount; ++i) { + ASTNode cl = (ASTNode) astChild.getChild(i); + if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { + order.append("+"); + cl = (ASTNode) cl.getChild(0); + } else if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEDESC) { + order.append("-"); + cl = (ASTNode) cl.getChild(0); + } else { + order.append("+"); + } + Map nodeOutputs = TypeCheckProcFactory.genExprNode(cl, tcCtx); + ExprNodeDesc desc = nodeOutputs.get(cl); + if (!(desc instanceof ExprNodeColumnDesc)) { + throw new SemanticException("Only partition keys are allowed for " + + "ordering partitions, input: " + desc.getExprString()); + } + colIndices.append(findPosInPartKeys(((ExprNodeColumnDesc) desc).getColumn(), tab.getPartCols())).append(","); + } + if (colIndices.length() > 0) { + colIndices.setLength(colIndices.length() - 1); + showPartsDesc.setOrderExpr(colIndices.append(":").append(order).toString()); + } + } + } + } + } + + private int findPosInPartKeys(String col, List schemas) { + int i = 0; + for (; i < schemas.size(); i++) { + if (schemas.get(i).getName().equalsIgnoreCase(col)) { + break; + } + } + return i; + } + private void analyzeShowCreateTable(ASTNode ast) throws SemanticException { ShowCreateTableDesc showCreateTblDesc; String tableName = getUnescapedName((ASTNode)ast.getChild(0)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index eda460f069..8e9b6e9570 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -1409,7 +1409,7 @@ showStatement | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tableName ((KW_FROM|KW_IN) db_name=identifier)? -> ^(TOK_SHOWCOLUMNS tableName $db_name?) | KW_SHOW KW_FUNCTIONS (KW_LIKE showFunctionIdentifier|showFunctionIdentifier)? -> ^(TOK_SHOWFUNCTIONS KW_LIKE? showFunctionIdentifier?) - | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec?) + | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? whereClause? orderByClause? limitClause? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec? whereClause? orderByClause? limitClause?) | KW_SHOW KW_CREATE KW_TABLE tabName=tableName -> ^(TOK_SHOW_CREATETABLE $tabName) | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=identifier)? KW_LIKE showStmtIdentifier partitionSpec? -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java index adf56ff5b3..4b250feac6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java @@ -34,6 +34,9 @@ private static final long serialVersionUID = 1L; String tabName; String resFile; + short limit = -1; + String orderExpr; + ExprNodeDesc filterExpr; // Filter the partitions to show based on on supplied spec Map partSpec; @@ -116,4 +119,28 @@ public String getResFile() { public void setResFile(String resFile) { this.resFile = resFile; } + + public short getLimit() { + return limit; + } + + public void setLimit(short limit) { + this.limit = limit; + } + + public void setFilterExpr(ExprNodeDesc expr) { + this.filterExpr = expr; + } + + public ExprNodeDesc getFilterExpr() { + return filterExpr; + } + + public void setOrderExpr(String orderExpr) { + this.orderExpr = orderExpr; + } + + public String getOrderExpr() { + return orderExpr; + } } diff --git a/ql/src/test/queries/clientpositive/show_partitions2.q b/ql/src/test/queries/clientpositive/show_partitions2.q new file mode 100644 index 0000000000..259447ef00 --- /dev/null +++ b/ql/src/test/queries/clientpositive/show_partitions2.q @@ -0,0 +1,43 @@ +CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds string, hs int, rs string); + +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=0, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=1, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=2, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=3, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=4, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=5, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=6, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=7, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=8, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=9, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=10, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=11, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=13, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=14, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=17, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=18, rs='EU'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=19, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=20, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=22, rs='AS'); +ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=23, rs='EU'); + +SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10'; +SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' AND hs > 20; +SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by hs DESC; +SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by hs DESC LIMIT 3; +SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by rs DESC, hs LIMIT 3; +SHOW PARTITIONS mpart1 PARTITION(rs='AS') WHERE ds = '1980-11-10' AND hs > 20; +SHOW PARTITIONS mpart1 WHERE hs > 5 and hs < 15 order by hs DESC, ds; + +SHOW PARTITIONS mpart1 ORDER BY hs DESC; +SHOW PARTITIONS mpart1 ORDER BY ds DESC; +SHOW PARTITIONS mpart1 ORDER BY ds ASC, hs DESC; +SHOW PARTITIONS mpart1 PARTITION(rs='AS') ORDER BY ds DESC; +SHOW PARTITIONS mpart1 ORDER BY hs DESC LIMIT 3; + +SHOW PARTITIONS mpart1 limit 3; +SHOW PARTITIONS mpart1 PARTITION(ds='1980-11-10') LIMIT 3; diff --git a/ql/src/test/results/clientpositive/show_partitions2.q.out b/ql/src/test/results/clientpositive/show_partitions2.q.out new file mode 100644 index 0000000000..b6b96acf1c --- /dev/null +++ b/ql/src/test/results/clientpositive/show_partitions2.q.out @@ -0,0 +1,400 @@ +PREHOOK: query: CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds string, hs int, rs string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mpart1 +POSTHOOK: query: CREATE TABLE mpart1 (key1 INT, value1 STRING) PARTITIONED BY (ds string, hs int, rs string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mpart1 +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=0, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=0, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=0/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=1, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=1, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=1/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=2, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=2, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=2/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=3, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=3, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=3/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=4, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=4, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=4/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=5, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=5, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=5/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=6, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=6, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=6/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=7, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=7, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=7/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=8, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=8, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=8/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=9, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=9, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=9/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=10, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=10, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=10/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=11, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-09', hs=11, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-09/hs=11/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=12, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=12/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=13, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=13, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=13/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=14, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=14, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=14/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=15, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=15/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=16, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=16/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=17, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=17, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=17/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=18, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=18, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=18/rs=EU +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=19, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=19, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=19/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=20, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=20, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=20/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=21, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=21/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=22, rs='AS') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=22, rs='AS') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=22/rs=AS +PREHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=23, rs='EU') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@mpart1 +POSTHOOK: query: ALTER TABLE mpart1 ADD PARTITION (ds='1980-11-10', hs=23, rs='EU') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@mpart1 +POSTHOOK: Output: default@mpart1@ds=1980-11-10/hs=23/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=13/rs=AS +ds=1980-11-10/hs=14/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=17/rs=AS +ds=1980-11-10/hs=18/rs=EU +ds=1980-11-10/hs=19/rs=AS +ds=1980-11-10/hs=20/rs=AS +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=23/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' AND hs > 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' AND hs > 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=23/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by hs DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by hs DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=23/rs=EU +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AS +ds=1980-11-10/hs=19/rs=AS +ds=1980-11-10/hs=18/rs=EU +ds=1980-11-10/hs=17/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=14/rs=AS +ds=1980-11-10/hs=13/rs=AS +ds=1980-11-10/hs=12/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by hs DESC LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by hs DESC LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=23/rs=EU +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by rs DESC, hs LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE ds = '1980-11-10' order by rs DESC, hs LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=18/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION(rs='AS') WHERE ds = '1980-11-10' AND hs > 20 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION(rs='AS') WHERE ds = '1980-11-10' AND hs > 20 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=22/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 WHERE hs > 5 and hs < 15 order by hs DESC, ds +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 WHERE hs > 5 and hs < 15 order by hs DESC, ds +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=14/rs=AS +ds=1980-11-10/hs=13/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-09/hs=11/rs=EU +ds=1980-11-09/hs=10/rs=AS +ds=1980-11-09/hs=9/rs=AS +ds=1980-11-09/hs=8/rs=AS +ds=1980-11-09/hs=7/rs=AS +ds=1980-11-09/hs=6/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 ORDER BY hs DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 ORDER BY hs DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=23/rs=EU +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AS +ds=1980-11-10/hs=19/rs=AS +ds=1980-11-10/hs=18/rs=EU +ds=1980-11-10/hs=17/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=14/rs=AS +ds=1980-11-10/hs=13/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-09/hs=11/rs=EU +ds=1980-11-09/hs=10/rs=AS +ds=1980-11-09/hs=9/rs=AS +ds=1980-11-09/hs=8/rs=AS +ds=1980-11-09/hs=7/rs=AS +ds=1980-11-09/hs=6/rs=EU +ds=1980-11-09/hs=5/rs=EU +ds=1980-11-09/hs=4/rs=EU +ds=1980-11-09/hs=3/rs=AS +ds=1980-11-09/hs=2/rs=AS +ds=1980-11-09/hs=1/rs=EU +ds=1980-11-09/hs=0/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=23/rs=EU +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AS +ds=1980-11-10/hs=19/rs=AS +ds=1980-11-10/hs=18/rs=EU +ds=1980-11-10/hs=17/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=14/rs=AS +ds=1980-11-10/hs=13/rs=AS +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-09/hs=11/rs=EU +ds=1980-11-09/hs=10/rs=AS +ds=1980-11-09/hs=9/rs=AS +ds=1980-11-09/hs=8/rs=AS +ds=1980-11-09/hs=7/rs=AS +ds=1980-11-09/hs=6/rs=EU +ds=1980-11-09/hs=5/rs=EU +ds=1980-11-09/hs=4/rs=EU +ds=1980-11-09/hs=3/rs=AS +ds=1980-11-09/hs=2/rs=AS +ds=1980-11-09/hs=1/rs=EU +ds=1980-11-09/hs=0/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 ORDER BY ds ASC, hs DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 ORDER BY ds ASC, hs DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-09/hs=11/rs=EU +ds=1980-11-09/hs=10/rs=AS +ds=1980-11-09/hs=9/rs=AS +ds=1980-11-09/hs=8/rs=AS +ds=1980-11-09/hs=7/rs=AS +ds=1980-11-09/hs=6/rs=EU +ds=1980-11-09/hs=5/rs=EU +ds=1980-11-09/hs=4/rs=EU +ds=1980-11-09/hs=3/rs=AS +ds=1980-11-09/hs=2/rs=AS +ds=1980-11-09/hs=1/rs=EU +ds=1980-11-09/hs=0/rs=AS +ds=1980-11-10/hs=23/rs=EU +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AS +ds=1980-11-10/hs=19/rs=AS +ds=1980-11-10/hs=18/rs=EU +ds=1980-11-10/hs=17/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=15/rs=EU +ds=1980-11-10/hs=14/rs=AS +ds=1980-11-10/hs=13/rs=AS +ds=1980-11-10/hs=12/rs=EU +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION(rs='AS') ORDER BY ds DESC +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION(rs='AS') ORDER BY ds DESC +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=AS +ds=1980-11-10/hs=20/rs=AS +ds=1980-11-10/hs=19/rs=AS +ds=1980-11-10/hs=17/rs=AS +ds=1980-11-10/hs=16/rs=AS +ds=1980-11-10/hs=14/rs=AS +ds=1980-11-10/hs=13/rs=AS +ds=1980-11-09/hs=10/rs=AS +ds=1980-11-09/hs=9/rs=AS +ds=1980-11-09/hs=8/rs=AS +ds=1980-11-09/hs=7/rs=AS +ds=1980-11-09/hs=3/rs=AS +ds=1980-11-09/hs=2/rs=AS +ds=1980-11-09/hs=0/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 ORDER BY hs DESC LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 ORDER BY hs DESC LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=23/rs=EU +ds=1980-11-10/hs=22/rs=AS +ds=1980-11-10/hs=21/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 limit 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 limit 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-09/hs=0/rs=AS +ds=1980-11-09/hs=1/rs=EU +ds=1980-11-09/hs=10/rs=AS +PREHOOK: query: SHOW PARTITIONS mpart1 PARTITION(ds='1980-11-10') LIMIT 3 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@mpart1 +POSTHOOK: query: SHOW PARTITIONS mpart1 PARTITION(ds='1980-11-10') LIMIT 3 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@mpart1 +ds=1980-11-10/hs=12/rs=EU +ds=1980-11-10/hs=13/rs=AS +ds=1980-11-10/hs=14/rs=AS