diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 68c6e44..8be31dd 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -139,6 +139,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, if(MetaStoreUtils.isCascadeNeededInAlterTable(oldt, newt)) { List parts = msdb.getPartitions(dbname, name, -1); for (Partition part : parts) { + part = part.deepCopy(); List oldCols = part.getSd().getCols(); part.getSd().setCols(newt.getSd().getCols()); String oldPartName = Warehouse.makePartName(oldt.getPartitionKeys(), part.getValues()); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java index 9762309..a777b14 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java @@ -161,6 +161,12 @@ public FilterPlan or(FilterPlan other) { */ public static class ScanPlan extends FilterPlan { + private String defaultPartitionName; + + public ScanPlan(String defaultPartitionName) { + this.defaultPartitionName = defaultPartitionName; + } + public static class ScanMarker { final String value; /** @@ -219,6 +225,15 @@ public ScanMarkerPair(ScanMarker startMarker, ScanMarker endMarker) { // represent Scan start, partition key name -> scanMarkerPair Map markers = new HashMap(); List ops = new ArrayList(); + boolean unknown = false; + + public boolean hasUnknown() { + return unknown; + } + + public void setUnknown(boolean unknown) { + this.unknown = unknown; + } // Get the number of partition key prefixes which can be used in the scan range. // For example, if partition key is (year, month, state) @@ -230,6 +245,9 @@ public ScanMarkerPair(ScanMarker startMarker, ScanMarker endMarker) { // nothing can be used in scan range, majorParts = 0 private int getMajorPartsCount(List parts) { int majorPartsCount = 0; + if (hasUnknown()) { + return majorPartsCount; + } while (majorPartsCount parts) { return majorPartsCount; } public Filter getFilter(List parts) { + if (hasUnknown()) { + return null; + } int majorPartsCount = getMajorPartsCount(parts); Set majorKeys = new HashSet(); for (int i=0;i parts) { } PartitionKeyComparator.Mark endMark = null; if (entry.getValue().endMarker != null) { - startMark = new PartitionKeyComparator.Mark(entry.getValue().endMarker.value, + endMark = new PartitionKeyComparator.Mark(entry.getValue().endMarker.value, entry.getValue().endMarker.isInclusive); } PartitionKeyComparator.Range range = new PartitionKeyComparator.Range( @@ -280,6 +301,9 @@ public Filter getFilter(List parts) { } public void setStartMarker(String keyName, String keyType, String start, boolean isInclusive) { + if (hasUnknown()) { + return; + } if (markers.containsKey(keyName)) { markers.get(keyName).startMarker = new ScanMarker(start, isInclusive, keyType); } else { @@ -289,6 +313,9 @@ public void setStartMarker(String keyName, String keyType, String start, boolean } public ScanMarker getStartMarker(String keyName) { + if (hasUnknown()) { + return null; + } if (markers.containsKey(keyName)) { return markers.get(keyName).startMarker; } else { @@ -297,6 +324,9 @@ public ScanMarker getStartMarker(String keyName) { } public void setEndMarker(String keyName, String keyType, String end, boolean isInclusive) { + if (hasUnknown()) { + return; + } if (markers.containsKey(keyName)) { markers.get(keyName).endMarker = new ScanMarker(end, isInclusive, keyType); } else { @@ -306,6 +336,9 @@ public void setEndMarker(String keyName, String keyType, String end, boolean isI } public ScanMarker getEndMarker(String keyName) { + if (hasUnknown()) { + return null; + } if (markers.containsKey(keyName)) { return markers.get(keyName).endMarker; } else { @@ -324,21 +357,28 @@ public FilterPlan and(FilterPlan other) { private ScanPlan and(ScanPlan other) { // create combined FilterPlan based on existing lhs and rhs plan - ScanPlan newPlan = new ScanPlan(); - newPlan.markers.putAll(markers); + ScanPlan newPlan = new ScanPlan(defaultPartitionName); + if (hasUnknown() || other.hasUnknown()) { + newPlan.setUnknown(true); + return newPlan; + } + for (Map.Entry entry : markers.entrySet()) { + newPlan.markers.put(entry.getKey(), new ScanMarkerPair( + entry.getValue().startMarker, entry.getValue().endMarker)); + } for (String keyName : other.markers.keySet()) { if (newPlan.markers.containsKey(keyName)) { // create new scan start ScanMarker greaterStartMarker = getComparedMarker(this.getStartMarker(keyName), - other.getStartMarker(keyName), true); + other.getStartMarker(keyName), true, defaultPartitionName); if (greaterStartMarker != null) { newPlan.setStartMarker(keyName, greaterStartMarker.type, greaterStartMarker.value, greaterStartMarker.isInclusive); } // create new scan end ScanMarker lesserEndMarker = getComparedMarker(this.getEndMarker(keyName), other.getEndMarker(keyName), - false); + false, defaultPartitionName); if (lesserEndMarker != null) { newPlan.setEndMarker(keyName, lesserEndMarker.type, lesserEndMarker.value, lesserEndMarker.isInclusive); } @@ -360,13 +400,27 @@ private ScanPlan and(ScanPlan other) { */ @VisibleForTesting static ScanMarker getComparedMarker(ScanMarker lStartMarker, ScanMarker rStartMarker, - boolean getGreater) { + boolean getGreater, String defaultPartitionName) { // if one of them has null bytes, just return other if(lStartMarker == null) { return rStartMarker; } else if (rStartMarker == null) { return lStartMarker; } + if (lStartMarker != null && lStartMarker.value != null && lStartMarker.value.equals(defaultPartitionName)) { + if (getGreater) { + return rStartMarker; + } else { + return lStartMarker; + } + } + if (rStartMarker != null && rStartMarker.value != null && rStartMarker.value.equals(defaultPartitionName)) { + if (getGreater) { + return lStartMarker; + } else { + return rStartMarker; + } + } TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(lStartMarker.type); ObjectInspector outputOI = @@ -394,10 +448,10 @@ static ScanMarker getComparedMarker(ScanMarker lStartMarker, ScanMarker rStartMa return new ScanMarker(lStartMarker.value, isInclusive, lStartMarker.type); } if (getGreater) { - return compareRes == 1 ? lStartMarker : rStartMarker; + return compareRes > 0 ? lStartMarker : rStartMarker; } // else - return compareRes == -1 ? lStartMarker : rStartMarker; + return compareRes < 0 ? lStartMarker : rStartMarker; } @@ -417,7 +471,8 @@ public FilterPlan or(FilterPlan other) { /** * @return row suffix - This is appended to db + table, to generate start row for the Scan */ - public byte[] getStartRowSuffix(String dbName, String tableName, List parts) { + public byte[] getStartRowSuffix(String dbName, String tableName, List parts, + String defaultPartitionName) { int majorPartsCount = getMajorPartsCount(parts); List majorPartTypes = new ArrayList(); List components = new ArrayList(); @@ -437,14 +492,16 @@ public FilterPlan or(FilterPlan other) { } } } - byte[] bytes = HBaseUtils.buildPartitionKey(dbName, tableName, majorPartTypes, components, endPrefix); + byte[] bytes = HBaseUtils.buildPartitionKey(dbName, tableName, majorPartTypes, components, + endPrefix, defaultPartitionName); return bytes; } /** * @return row suffix - This is appended to db + table, to generate end row for the Scan */ - public byte[] getEndRowSuffix(String dbName, String tableName, List parts) { + public byte[] getEndRowSuffix(String dbName, String tableName, List parts, + String defaultPartitionName) { int majorPartsCount = getMajorPartsCount(parts); List majorPartTypes = new ArrayList(); List components = new ArrayList(); @@ -464,7 +521,8 @@ public FilterPlan or(FilterPlan other) { } } } - byte[] bytes = HBaseUtils.buildPartitionKey(dbName, tableName, majorPartTypes, components, endPrefix); + byte[] bytes = HBaseUtils.buildPartitionKey(dbName, tableName, majorPartTypes, components, + endPrefix, defaultPartitionName); if (components.isEmpty()) { bytes[bytes.length-1]++; } @@ -507,10 +565,13 @@ public String toString() { private Map nameToType = new HashMap(); - public PartitionFilterGenerator(List parts) { + private String defaultPartitionName; + + public PartitionFilterGenerator(List parts, String defaultPartitionName) { for (FieldSchema part : parts) { nameToType.put(part.getName(), part.getType()); } + this.defaultPartitionName = defaultPartitionName; } FilterPlan getPlan() { @@ -551,8 +612,13 @@ protected void endTreeNode(TreeNode node) throws MetaException { @Override public void visit(LeafNode node) throws MetaException { - ScanPlan leafPlan = new ScanPlan(); + ScanPlan leafPlan = new ScanPlan(defaultPartitionName); curPlan = leafPlan; + if (!nameToType.containsKey(node.keyName)) { + leafPlan.setUnknown(true); + hasUnsupportedCondition = true; + return; + } // this is a condition on first partition column, so might influence the // start and end of the scan @@ -599,12 +665,13 @@ private boolean hasUnsupportedCondition() { } } - public static PlanResult getFilterPlan(ExpressionTree exprTree, List parts) throws MetaException { + public static PlanResult getFilterPlan(ExpressionTree exprTree, List parts, + String defaultPartitionName) throws MetaException { if (exprTree == null) { // TODO: if exprTree is null, we should do what ObjectStore does. See HIVE-10102 - return new PlanResult(new ScanPlan(), true); + return new PlanResult(new ScanPlan(defaultPartitionName), true); } - PartitionFilterGenerator pGenerator = new PartitionFilterGenerator(parts); + PartitionFilterGenerator pGenerator = new PartitionFilterGenerator(parts, defaultPartitionName); exprTree.accept(pGenerator); return new PlanResult(pGenerator.getPlan(), pGenerator.hasUnsupportedCondition()); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java index d503cff..8deed34 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Iterators; + import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; @@ -39,10 +40,12 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.filter.RegexStringComparator; import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -51,6 +54,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; @@ -93,6 +97,7 @@ final static String DB_TABLE = "HBMS_DBS"; final static String FUNC_TABLE = "HBMS_FUNCS"; final static String GLOBAL_PRIVS_TABLE = "HBMS_GLOBAL_PRIVS"; + final static String COLUMN_PRIVS_TABLE = "HBMS_COLUMN_PRIVS"; final static String PART_TABLE = "HBMS_PARTITIONS"; final static String ROLE_TABLE = "HBMS_ROLES"; final static String SD_TABLE = "HBMS_SDS"; @@ -105,11 +110,14 @@ final static byte[] CATALOG_CF = "c".getBytes(HBaseUtils.ENCODING); final static byte[] STATS_CF = "s".getBytes(HBaseUtils.ENCODING); final static String NO_CACHE_CONF = "no.use.cache"; + static enum ColumnPrivilegeType { + TABLE, PARTITION + } /** * List of tables in HBase */ public final static String[] tableNames = { AGGR_STATS_TABLE, DB_TABLE, FUNC_TABLE, - GLOBAL_PRIVS_TABLE, PART_TABLE, USER_TO_ROLE_TABLE, + GLOBAL_PRIVS_TABLE, COLUMN_PRIVS_TABLE, PART_TABLE, USER_TO_ROLE_TABLE, ROLE_TABLE, SD_TABLE, SECURITY_TABLE, SEQUENCES_TABLE, TABLE_TABLE, INDEX_TABLE, FILE_METADATA_TABLE }; public final static Map> columnFamilies = new HashMap<> (tableNames.length); @@ -119,6 +127,7 @@ columnFamilies.put(DB_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(FUNC_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(GLOBAL_PRIVS_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(COLUMN_PRIVS_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(PART_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); columnFamilies.put(USER_TO_ROLE_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(ROLE_TABLE, Arrays.asList(CATALOG_CF)); @@ -608,7 +617,8 @@ Partition getPartition(String dbName, String tableName, List partVals) List parts = new ArrayList<>(partValLists.size()); List gets = new ArrayList<>(partValLists.size()); for (List partVals : partValLists) { - byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals); + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); Get get = new Get(key); get.addColumn(CATALOG_CF, CATALOG_COL); gets.add(get); @@ -637,7 +647,8 @@ Partition getPartition(String dbName, String tableName, List partVals) void putPartition(Partition partition) throws IOException { byte[] hash = putStorageDescriptor(partition.getSd()); byte[][] serialized = HBaseUtils.serializePartition(partition, - HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()), hash); + HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()), + hash, HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); partCache.put(partition.getDbName(), partition.getTableName(), partition); } @@ -659,11 +670,14 @@ void replacePartition(Partition oldPart, Partition newPart, List partTyp hash = putStorageDescriptor(newPart.getSd()); } byte[][] serialized = HBaseUtils.serializePartition(newPart, - HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash); + HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), + hash, HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); partCache.put(newPart.getDbName(), newPart.getTableName(), newPart); - if (!oldPart.getTableName().equals(newPart.getTableName())) { - deletePartition(oldPart.getDbName(), oldPart.getTableName(), partTypes, oldPart.getValues()); + if (!(oldPart.getDbName().equals(newPart.getDbName()) && + oldPart.getTableName().equals(newPart.getTableName()) && + oldPart.getValues().equals(newPart.getValues()))) { + deletePartition(oldPart.getDbName(), oldPart.getTableName(), partTypes, oldPart.getValues(), false); } } @@ -679,7 +693,8 @@ void putPartitions(List partitions) throws IOException { byte[] hash = putStorageDescriptor(partition.getSd()); List partTypes = HBaseUtils.getPartitionKeyTypes( getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()); - byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash); + byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); Put p = new Put(serialized[0]); p.add(CATALOG_CF, CATALOG_COL, serialized[1]); puts.add(p); @@ -690,7 +705,8 @@ void putPartitions(List partitions) throws IOException { conn.flush(htab); } - void replacePartitions(List oldParts, List newParts, List oldPartTypes) throws IOException { + void replacePartitions(List oldParts, List newParts, + List oldPartTypes, List newPartTypes) throws IOException { if (oldParts.size() != newParts.size()) { throw new RuntimeException("Number of old and new partitions must match."); } @@ -705,14 +721,18 @@ void replacePartitions(List oldParts, List newParts, List< decrementStorageDescriptorRefCount(oldParts.get(i).getSd()); hash = putStorageDescriptor(newParts.get(i).getSd()); } + Partition oldPart = oldParts.get(i); Partition newPart = newParts.get(i); - byte[][] serialized = HBaseUtils.serializePartition(newPart, - HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash); + byte[][] serialized = HBaseUtils.serializePartition(newPart, newPartTypes, hash, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); Put p = new Put(serialized[0]); p.add(CATALOG_CF, CATALOG_COL, serialized[1]); puts.add(p); partCache.put(newParts.get(i).getDbName(), newParts.get(i).getTableName(), newParts.get(i)); - if (!newParts.get(i).getTableName().equals(oldParts.get(i).getTableName())) { + if (!(newPart.getDbName().equals(oldPart.getDbName()) + && newPart.getTableName().equals(oldPart.getTableName()) + && newPart.getValues().equals(oldPart.getValues()) + && newPartTypes.equals(oldPartTypes))) { // We need to remove the old record as well. deletePartition(oldParts.get(i).getDbName(), oldParts.get(i).getTableName(), oldPartTypes, oldParts.get(i).getValues(), false); @@ -741,7 +761,7 @@ void replacePartitions(List oldParts, List newParts, List< : new ArrayList<>(cached); } byte[] keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, new ArrayList(), - new ArrayList(), false); + new ArrayList(), false, HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); List parts = scanPartitionsWithFilter(dbName, tableName, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), -1, null); partCache.put(dbName, tableName, parts, true); @@ -826,7 +846,8 @@ String printPartition(String partKey) throws IOException, TException { byte[] key = HBaseUtils.buildPartitionKey(partKeyParts[0], partKeyParts[1], HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys()), - Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length))); + Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length)), + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(PART_TABLE); Get g = new Get(key); @@ -915,7 +936,8 @@ private void deletePartition(String dbName, String tableName, List partT Partition p = getPartition(dbName, tableName, partVals, false); decrementStorageDescriptorRefCount(p.getSd()); } - byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals); + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); delete(PART_TABLE, key, null, null); } @@ -924,7 +946,8 @@ private Partition getPartition(String dbName, String tableName, List par Partition cached = partCache.get(dbName, tableName, partVals); if (cached != null) return cached; byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, - HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals); + HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); byte[] serialized = read(PART_TABLE, key, CATALOG_CF, CATALOG_COL); if (serialized == null) return null; HBaseUtils.StorageDescriptorParts sdParts = @@ -981,17 +1004,11 @@ private PartitionScanInfo scanPartitionsInternal(String dbName, String tableName int firstStar = -1; for (int i = 0; i < partVals.size(); i++) { - if ("*".equals(partVals.get(i))) { + if ("*".equals(partVals.get(i)) || "".equals(partVals.get(i))) { firstStar = i; break; } else { - // empty string equals to null partition, - // means star - if (partVals.get(i).equals("")) { - break; - } else { - keyElements.add(partVals.get(i)); - } + keyElements.add(partVals.get(i)); } } @@ -1004,7 +1021,8 @@ private PartitionScanInfo scanPartitionsInternal(String dbName, String tableName } keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys().subList(0, keyElements.size()-2)), - keyElements.subList(2, keyElements.size())); + keyElements.subList(2, keyElements.size()), + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); // Now, build a filter out of the remaining keys List ranges = new ArrayList(); @@ -1014,7 +1032,7 @@ private PartitionScanInfo scanPartitionsInternal(String dbName, String tableName for (int i = Math.max(0, firstStar); i < table.getPartitionKeys().size() && i < partVals.size(); i++) { - if ("*".equals(partVals.get(i))) { + if ("*".equals(partVals.get(i))||"".equals(partVals.get(i))) { PartitionKeyComparator.Operator op = new PartitionKeyComparator.Operator( PartitionKeyComparator.Operator.Type.LIKE, table.getPartitionKeys().get(i).getName(), @@ -1354,7 +1372,7 @@ void removeRoleGrants(String roleName) throws IOException { conn.flush(htab); } - // Finally, walk the table table + // Then, walk the table table puts.clear(); for (Database db : dbs) { List tables = scanTables(db.getName(), null); @@ -1378,6 +1396,27 @@ void removeRoleGrants(String roleName) throws IOException { htab.put(puts); conn.flush(htab); } + + // Finally, walk COLUMN_PRIVS_TABLE table + puts.clear(); + Iterator iter = scan(COLUMN_PRIVS_TABLE, CATALOG_CF, CATALOG_COL); + while (iter.hasNext()) { + Result result = iter.next(); + PrincipalPrivilegeSet privs = HBaseUtils.deserializePrincipalPrivilegeSet( + result.getValue(CATALOG_CF, CATALOG_COL)); + if (privs.getRolePrivileges() != null && privs.getRolePrivileges().get(roleName) != null) { + privs.getRolePrivileges().remove(roleName); + Put put = new Put(result.getRow()); + put.add(CATALOG_CF, CATALOG_COL, HBaseUtils.serializePrincipalPrivilegeSet(privs)); + puts.add(put); + } + } + + if (puts.size() > 0) { + HTableInterface htab = conn.getHBaseTable(COLUMN_PRIVS_TABLE); + htab.put(puts); + conn.flush(htab); + } } /** @@ -1501,6 +1540,113 @@ private void buildRoleCache() throws IOException { } } + void deleteColumnPrivilege(String dbName, String tableName, String partName) + throws IOException { + List deletes = new ArrayList(); + byte[] keyPrefix; + if (partName != null) { + keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, tableName, partName); + } else { + keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, tableName); + } + Iterator iter = + scan(COLUMN_PRIVS_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), + CATALOG_CF, CATALOG_COL, new KeyOnlyFilter()); + while (iter.hasNext()) { + Result result = iter.next(); + deletes.add(new Delete(result.getRow())); + } + if (deletes.size() > 0) { + HTableInterface htab = conn.getHBaseTable(COLUMN_PRIVS_TABLE); + htab.delete(deletes); + conn.flush(htab); + } + } + + List> scanColumnPrivilege(ColumnPrivilegeType type) + throws IOException { + List> ret = + new ArrayList>(); + // TODO: Push type filter to server side + Iterator iter = + scan(COLUMN_PRIVS_TABLE, CATALOG_CF, CATALOG_COL); + while (iter.hasNext()) { + Result result = iter.next(); + String[] columnSpec = HBaseUtils.deserializeKey(result.getRow()); + boolean includeInResult = false; + if (type == ColumnPrivilegeType.TABLE) { + if (columnSpec[2] == null || columnSpec[2].isEmpty()) { + includeInResult = true; + } + } else { // ColumnPrivilegeType.PARTITION + if (columnSpec[2] != null && !columnSpec[2].isEmpty()) { + includeInResult = true; + } + } + if (includeInResult) { + byte[] serialized = result.getValue(CATALOG_CF, CATALOG_COL); + if (serialized == null) continue; + PrincipalPrivilegeSet pps = HBaseUtils.deserializePrincipalPrivilegeSet(serialized); + ret.add(new ObjectPair(columnSpec, pps)); + } + } + return ret; + } + + Map scanColumnPrivilege(String dbName, + String tableName, String partName) + throws IOException { + Map ret = new HashMap(); + byte[] keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, tableName, + partName!=null?partName:""); + Iterator iter = + scan(COLUMN_PRIVS_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), + CATALOG_CF, CATALOG_COL, null); + while (iter.hasNext()) { + Result result = iter.next(); + String[] columnSpec = HBaseUtils.deserializeKey(result.getRow()); + byte[] serialized = result.getValue(CATALOG_CF, CATALOG_COL); + if (serialized == null) continue; + PrincipalPrivilegeSet pps = HBaseUtils.deserializePrincipalPrivilegeSet(serialized); + ret.put(columnSpec[3], pps); + } + return ret; + } + + void putColumnPrivilege(String dbName, String tableName, String partName, + Map privsMap) throws IOException { + List puts = new ArrayList<>(); + for (Map.Entry entry : privsMap.entrySet()) { + byte[] key = HBaseUtils.buildKey(dbName, tableName, partName!=null?partName:"" + , entry.getKey()); + Put put = new Put(key); + put.add(CATALOG_CF, CATALOG_COL, HBaseUtils.serializePrincipalPrivilegeSet(entry.getValue())); + puts.add(put); + } + if (puts.size() > 0) { + HTableInterface htab = conn.getHBaseTable(COLUMN_PRIVS_TABLE); + htab.put(puts); + conn.flush(htab); + } + } + + PrincipalPrivilegeSet getColumnPrivilege(String dbName, String tableName, + String partitionName, String columnName) throws IOException { + byte[] key = HBaseUtils.buildKey(dbName, tableName, partitionName!=null?partitionName:"" + , columnName); + byte[] serialized = read(COLUMN_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + return HBaseUtils.deserializePrincipalPrivilegeSet(serialized); + } + + void putColumnPrivilege(String dbName, String tableName, + String partitionName, String columnName, PrincipalPrivilegeSet privs) throws IOException { + byte[] key = HBaseUtils.buildKey(dbName, tableName, partitionName!=null?partitionName:"" + , columnName); + store(COLUMN_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL, + HBaseUtils.serializePrincipalPrivilegeSet(privs)); + } + /********************************************************************************************** * Table related methods *********************************************************************************************/ @@ -1583,7 +1729,8 @@ Table getTable(String dbName, String tableName) throws IOException { } Filter filter = null; if (regex != null) { - filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); + filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator( + dbName + HBaseUtils.KEY_SEPARATOR + regex + "$")); } Iterator iter = scan(TABLE_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), @@ -1633,8 +1780,9 @@ void replaceTable(Table oldTable, Table newTable) throws IOException { byte[][] serialized = HBaseUtils.serializeTable(newTable, hash); store(TABLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); tableCache.put(new ObjectPair<>(newTable.getDbName(), newTable.getTableName()), newTable); - if (!oldTable.getTableName().equals(newTable.getTableName())) { - deleteTable(oldTable.getDbName(), oldTable.getTableName()); + if (!(oldTable.getDbName().equals(newTable.getDbName()) && + oldTable.getTableName().equals(newTable.getTableName()))) { + deleteTable(oldTable.getDbName(), oldTable.getTableName(), false); } } @@ -1872,6 +2020,9 @@ void replaceIndex(Index oldIndex, Index newIndex) throws IOException { * @throws IOException */ StorageDescriptor getStorageDescriptor(byte[] hash) throws IOException { + if (hash == null || hash.length == 0) { + return null; + } ByteArrayWrapper hashKey = new ByteArrayWrapper(hash); StorageDescriptor cached = sdCache.get(hashKey); if (cached != null) return cached; @@ -1893,6 +2044,9 @@ StorageDescriptor getStorageDescriptor(byte[] hash) throws IOException { * @throws IOException */ void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException { + if (sd == null) { + return; + } byte[] key = HBaseUtils.hashStorageDescriptor(sd, md); byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); if (serializedRefCnt == null) { @@ -1926,6 +2080,9 @@ void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException * @return id of the entry in the cache, to be written in for the storage descriptor */ byte[] putStorageDescriptor(StorageDescriptor storageDescriptor) throws IOException { + if (storageDescriptor == null) { + return null; + } byte[] sd = HBaseUtils.serializeStorageDescriptor(storageDescriptor); byte[] key = HBaseUtils.hashStorageDescriptor(storageDescriptor, md); byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); @@ -2012,17 +2169,35 @@ public int hashCode() { * @param tableName table to update statistics for * @param partVals partition values that define partition to update statistics for. If this is * null, then these will be assumed to be table level statistics + * @param colNames list of columns inside stats * @param stats Stats object with stats for one or more columns * @throws IOException + * @throws MetaException */ - void updateStatistics(String dbName, String tableName, List partVals, - ColumnStatistics stats) throws IOException { + void updateStatistics(String dbName, String tableName, List partVals, List colNames, + ColumnStatistics stats) throws IOException, MetaException { byte[] key = getStatisticsKey(dbName, tableName, partVals); String hbaseTable = getStatisticsTable(partVals); + ColumnStatistics orig; + if (partVals == null) { + orig = getTableStatistics(dbName, tableName, colNames); + } else { + orig = getOnePartitionStatistics(dbName, tableName, partVals, colNames); + } + Map origColumnStatsMap = new HashMap(); + if (orig != null && orig.getStatsObj() != null) { + for (ColumnStatisticsObj obj : orig.getStatsObj()) { + origColumnStatsMap.put(obj.getColName(), obj); + } + } byte[][] colnames = new byte[stats.getStatsObjSize()][]; byte[][] serialized = new byte[stats.getStatsObjSize()][]; for (int i = 0; i < stats.getStatsObjSize(); i++) { ColumnStatisticsObj obj = stats.getStatsObj().get(i); + if (origColumnStatsMap.containsKey(obj.getColName())) { + obj = HBaseUtils.mergeColumnStatisticsForOneColumn( + origColumnStatsMap.get(obj.getColName()), obj); + } serialized[i] = HBaseUtils.serializeStatsForOneColumn(stats, obj); String colname = obj.getColName(); colnames[i] = HBaseUtils.buildKey(colname); @@ -2030,6 +2205,16 @@ void updateStatistics(String dbName, String tableName, List partVals, store(hbaseTable, key, STATS_CF, colnames, serialized); } + void deleteStatistics(String dbName, String tableName, List partVals, + String colName) throws IOException { + byte[] key = getStatisticsKey(dbName, tableName, partVals); + String hbaseTable = getStatisticsTable(partVals); + HTableInterface htab = conn.getHBaseTable(hbaseTable); + Delete d = new Delete(key); + d.addColumn(STATS_CF, HBaseUtils.buildKey(colName)); + htab.delete(d); + } + /** * Get statistics for a table * @@ -2067,6 +2252,40 @@ ColumnStatistics getTableStatistics(String dbName, String tblName, List return tableStats; } + ColumnStatistics getOnePartitionStatistics(String dbName, String tblName, + List partVals, List colNames) + throws IOException, MetaException { + String partName = Warehouse.makePartName(getTable(dbName, tblName).getPartitionKeys(), + partVals); + byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, + HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()), + partVals, HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); + ColumnStatistics partStats = new ColumnStatistics(); + ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); + statsDesc.setIsTblLevel(false); + statsDesc.setDbName(dbName); + statsDesc.setTableName(tblName); + statsDesc.setPartName(partName); + partStats.setStatsDesc(statsDesc); + byte[][] colKeys = new byte[colNames.size()][]; + for (int i = 0; i < colKeys.length; i++) { + colKeys[i] = HBaseUtils.buildKey(colNames.get(i)); + } + Result result = read(PART_TABLE, partKey, STATS_CF, colKeys); + for (int i = 0; i < colKeys.length; i++) { + byte[] serializedColStats = result.getValue(STATS_CF, colKeys[i]); + if (serializedColStats == null) { + // There were no stats for this column, so skip it + continue; + } + ColumnStatisticsObj obj = + HBaseUtils.deserializeStatsForOneColumn(partStats, serializedColStats); + obj.setColName(colNames.get(i)); + partStats.addToStatsObj(obj); + } + return partStats; + } + /** * Get statistics for a set of partitions * @@ -2099,7 +2318,7 @@ ColumnStatistics getTableStatistics(String dbName, String tblName, List valToPartMap.put(partVals.get(i), partNames.get(i)); byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()), - partVals.get(i)); + partVals.get(i), HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); Get get = new Get(partKey); for (byte[] colName : colNameBytes) { get.addColumn(STATS_CF, colName); @@ -2219,7 +2438,7 @@ void putAggregatedStats(byte[] key, String dbName, String tableName, List partVals) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 9edf9bf..761b3a9 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -26,6 +26,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.FileMetadataHandler; @@ -72,6 +73,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult; import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan; +import org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite.ColumnPrivilegeType; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; @@ -105,6 +107,7 @@ private Map fmHandlers; public HBaseStore() { + LOG.info("Using HBase Metastore"); } @Override @@ -313,6 +316,7 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, try { getHBase().deleteTable(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tableName)); + getHBase().deleteColumnPrivilege(dbName, tableName, null); commit = true; return true; } catch (IOException e) { @@ -332,9 +336,13 @@ public Table getTable(String dbName, String tableName) throws MetaException { HiveStringUtils.normalizeIdentifier(tableName)); if (table == null) { LOG.debug("Unable to find table " + tableNameForErrorMsg(dbName, tableName)); + return table; } - commit = true; - return table; + SharedTable sTable = new SharedTable(); + sTable.setShared(table); + // For backward compatibility, getPartition does not return privileges + sTable.unsetPrivileges(); + return sTable; } catch (IOException e) { LOG.error("Unable to get table", e); throw new MetaException("Error reading table " + e.getMessage()); @@ -348,10 +356,21 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE boolean commit = false; openTransaction(); try { + Table table = getHBase().getTable(part.getDbName(), part.getTableName()); Partition partCopy = part.deepCopy(); partCopy.setDbName(HiveStringUtils.normalizeIdentifier(part.getDbName())); partCopy.setTableName(HiveStringUtils.normalizeIdentifier(part.getTableName())); + if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + partCopy.setPrivileges(table.getPrivileges().deepCopy()); + } getHBase().putPartition(partCopy); + if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + // Copy table column priv to partition + Map privs = getHBase().scanColumnPrivilege(part.getDbName(), + part.getTableName(), null); + String partName = Warehouse.makePartName(table.getPartitionKeys(), partCopy.getValues()); + getHBase().putColumnPrivilege(part.getDbName(), part.getTableName(), partName, privs); + } commit = true; return true; } catch (IOException e) { @@ -368,12 +387,25 @@ public boolean addPartitions(String dbName, String tblName, List part boolean commit = false; openTransaction(); try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tblName = HiveStringUtils.normalizeIdentifier(tblName); + Table table = getHBase().getTable(dbName, tblName); List partsCopy = new ArrayList(); for (int i=0;i privs = getHBase().scanColumnPrivilege( + partCopy.getDbName(), partCopy.getTableName(), null); + getHBase().putColumnPrivilege(partCopy.getDbName(), partCopy.getTableName(), partName, privs); + } } getHBase().putPartitions(partsCopy); commit = true; @@ -404,8 +436,12 @@ public Partition getPartition(String dbName, String tableName, List part throw new NoSuchObjectException("Unable to find partition " + partNameForErrorMsg(dbName, tableName, part_vals)); } + SharedPartition sPartition = new SharedPartition(); + sPartition.setShared(part); + // For backward compatibility, getPartition does not return privileges + sPartition.unsetPrivileges(); commit = true; - return part; + return sPartition; } catch (IOException e) { LOG.error("Unable to get partition", e); throw new MetaException("Error reading partition " + e.getMessage()); @@ -441,14 +477,17 @@ public boolean dropPartition(String dbName, String tableName, List part_ dbName = HiveStringUtils.normalizeIdentifier(dbName); tableName = HiveStringUtils.normalizeIdentifier(tableName); getHBase().deletePartition(dbName, tableName, HBaseUtils.getPartitionKeyTypes( - getTable(dbName, tableName).getPartitionKeys()), part_vals); + getHBase().getTable(dbName, tableName).getPartitionKeys()), part_vals); // Drop any cached stats that reference this partitions getHBase().getStatsCache().invalidate(dbName, tableName, buildExternalPartName(dbName, tableName, part_vals)); + Table table = getHBase().getTable(dbName, tableName); + String partName = Warehouse.makePartName(table.getPartitionKeys(), part_vals); + getHBase().deleteColumnPrivilege(dbName, tableName, partName); commit = true; return true; } catch (IOException e) { - LOG.error("Unable to delete db" + e); + LOG.error("Unable to drop partition" + e); throw new MetaException("Unable to drop partition " + partNameForErrorMsg(dbName, tableName, part_vals)); } finally { @@ -465,7 +504,7 @@ public boolean dropPartition(String dbName, String tableName, List part_ List parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tableName), max); commit = true; - return parts; + return HBaseUtils.getSharedPartitionsNoPriv(parts); } catch (IOException e) { LOG.error("Unable to get partitions", e); throw new MetaException("Error scanning partitions"); @@ -480,31 +519,37 @@ public void alterTable(String dbName, String tableName, Table newTable) throws I boolean commit = false; openTransaction(); try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + Table oldTable = getHBase().getTable(dbName, tableName); Table newTableCopy = newTable.deepCopy(); newTableCopy.setDbName(HiveStringUtils.normalizeIdentifier(newTableCopy.getDbName())); - List oldPartTypes = getTable(dbName, tableName).getPartitionKeys()==null? - null:HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()); + List oldPartTypes = getHBase().getTable(dbName, tableName).getPartitionKeys()==null? + null:HBaseUtils.getPartitionKeyTypes(getHBase().getTable(dbName, tableName).getPartitionKeys()); newTableCopy.setTableName(HiveStringUtils.normalizeIdentifier(newTableCopy.getTableName())); - getHBase().replaceTable(getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)), newTableCopy); - if (newTable.getPartitionKeys() != null && newTable.getPartitionKeys().size() > 0 - && !tableName.equals(newTable.getTableName())) { - // They renamed the table, so we need to change each partition as well, since it changes - // the key. + // alter table don't suppose to change privileges + newTableCopy.setPrivileges(oldTable.getPrivileges()); + if (oldTable.getPartitionKeys() != null && newTable.getPartitionKeys() != null && + (!dbName.equals(newTable.getDbName()) // Change db + || !tableName.equals(newTable.getTableName()) // Rename table + || !newTable.getPartitionKeys().equals(oldTable.getPartitionKeys()))) // Change partition keys + { try { List oldParts = getPartitions(dbName, tableName, -1); List newParts = new ArrayList<>(oldParts.size()); for (Partition oldPart : oldParts) { Partition newPart = oldPart.deepCopy(); + newPart.setDbName(newTable.getDbName()); newPart.setTableName(newTable.getTableName()); newParts.add(newPart); } - getHBase().replacePartitions(oldParts, newParts, oldPartTypes); + getHBase().replacePartitions(oldParts, newParts, oldPartTypes, + HBaseUtils.getPartitionKeyTypes(newTable.getPartitionKeys())); } catch (NoSuchObjectException e) { LOG.debug("No partitions found for old table so not worrying about it"); } - } + getHBase().replaceTable(oldTable, newTableCopy); commit = true; } catch (IOException e) { LOG.error("Unable to alter table " + tableNameForErrorMsg(dbName, tableName), e); @@ -516,6 +561,9 @@ public void alterTable(String dbName, String tableName, Table newTable) throws I @Override public List getTables(String dbName, String pattern) throws MetaException { + if (pattern == null || pattern.isEmpty()) { + return new ArrayList(); + } boolean commit = false; openTransaction(); try { @@ -578,7 +626,7 @@ public void alterTable(String dbName, String tableName, Table newTable) throws I List
tables = getHBase().getTables(HiveStringUtils.normalizeIdentifier(dbname), normalizedTableNames); commit = true; - return tables; + return HBaseUtils.getSharedTablesNoPriv(tables); } catch (IOException e) { LOG.error("Unable to get tables ", e); throw new MetaException("Unable to get tables, " + e.getMessage()); @@ -589,7 +637,7 @@ public void alterTable(String dbName, String tableName, Table newTable) throws I @Override public List getAllTables(String dbName) throws MetaException { - return getTables(dbName, null); + return getTables(dbName, "*"); } @Override @@ -647,13 +695,26 @@ public void alterPartition(String db_name, String tbl_name, List part_va boolean commit = false; openTransaction(); try { + db_name = HiveStringUtils.normalizeIdentifier(db_name); + tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name); Partition new_partCopy = new_part.deepCopy(); new_partCopy.setDbName(HiveStringUtils.normalizeIdentifier(new_partCopy.getDbName())); new_partCopy.setTableName(HiveStringUtils.normalizeIdentifier(new_partCopy.getTableName())); - Partition oldPart = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), part_vals); + Partition oldPart = getHBase().getPartition(db_name, tbl_name, part_vals); + // alter partition don't suppose to change privileges + new_partCopy.setPrivileges(oldPart.getPrivileges()); getHBase().replacePartition(oldPart, new_partCopy, HBaseUtils.getPartitionKeyTypes( - getTable(db_name, tbl_name).getPartitionKeys())); + getHBase().getTable(db_name, tbl_name).getPartitionKeys())); + // Update column stats if needed + if (!(db_name.equals(new_partCopy.getDbName()) && + tbl_name.equals(new_partCopy.getTableName()) && + part_vals.equals(new_partCopy.getValues()))) { + Map privs = getHBase().scanColumnPrivilege(db_name, tbl_name, + Warehouse.makePartName(getHBase().getTable(db_name, tbl_name).getPartitionKeys(), part_vals)); + getHBase().putColumnPrivilege(db_name, tbl_name, + Warehouse.makePartName(getHBase().getTable(db_name, tbl_name).getPartitionKeys(), + new_partCopy.getValues()), privs); + } // Drop any cached stats that reference this partitions getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name), @@ -674,19 +735,32 @@ public void alterPartitions(String db_name, String tbl_name, List> boolean commit = false; openTransaction(); try { + db_name = HiveStringUtils.normalizeIdentifier(db_name); + tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name); List new_partsCopy = new ArrayList(); + List oldParts = getHBase().getPartitions(db_name, tbl_name, + HBaseUtils.getPartitionKeyTypes(getHBase().getTable(db_name, tbl_name).getPartitionKeys()), + part_vals_list); for (int i=0;i oldParts = getHBase().getPartitions(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), - HBaseUtils.getPartitionKeyTypes(getTable(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name)).getPartitionKeys()), part_vals_list); - getHBase().replacePartitions(oldParts, new_partsCopy, HBaseUtils.getPartitionKeyTypes( - getTable(db_name, tbl_name).getPartitionKeys())); + List partKeyTypes = HBaseUtils.getPartitionKeyTypes( + getHBase().getTable(db_name, tbl_name).getPartitionKeys()); + getHBase().replacePartitions(oldParts, new_partsCopy, partKeyTypes, partKeyTypes); + // Update column stats if needed + for (int i=0;i privs = getHBase().scanColumnPrivilege(db_name, tbl_name, + Warehouse.makePartName(getHBase().getTable(db_name, tbl_name).getPartitionKeys(), oldPart.getValues())); + getHBase().putColumnPrivilege(db_name, tbl_name, + Warehouse.makePartName(getHBase().getTable(db_name, tbl_name).getPartitionKeys(), newPart.getValues()), privs); + } for (List part_vals : part_vals_list) { getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name), @@ -838,7 +912,8 @@ public void alterIndex(String dbname, String baseTblName, String name, Index new openTransaction(); try { getPartitionsByExprInternal(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), exprTree, maxParts, result); + HiveStringUtils.normalizeIdentifier(tblName), exprTree, maxParts, result, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); commit = true; return result; } finally { @@ -853,10 +928,11 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); dbName = HiveStringUtils.normalizeIdentifier(dbName); tblName = HiveStringUtils.normalizeIdentifier(tblName); - Table table = getTable(dbName, tblName); boolean commit = false; - openTransaction(); try { + Table table = getHBase().getTable(dbName, tblName); + openTransaction(); + boolean hasUnknownPartitions; if (exprTree == null) { List partNames = new LinkedList(); @@ -864,10 +940,13 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, table, expr, defaultPartitionName, maxParts, partNames); result.addAll(getPartitionsByNames(dbName, tblName, partNames)); } else { - hasUnknownPartitions = getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result); + hasUnknownPartitions = getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, + result, defaultPartitionName); } commit = true; return hasUnknownPartitions; + } catch (IOException e) { + throw new TException(e); } finally { commitOrRoleBack(commit); } @@ -919,30 +998,37 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, } private boolean getPartitionsByExprInternal(String dbName, String tblName, - ExpressionTree exprTree, short maxParts, List result) throws MetaException, + ExpressionTree exprTree, short maxParts, List result, + String defaultPartitionName) throws MetaException, NoSuchObjectException { dbName = HiveStringUtils.normalizeIdentifier(dbName); tblName = HiveStringUtils.normalizeIdentifier(tblName); - Table table = getTable(dbName, tblName); - if (table == null) { - throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName); - } - // general hbase filter plan from expression tree - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys()); - if (LOG.isDebugEnabled()) { - LOG.debug("Hbase Filter Plan generated : " + planRes.plan); - } + try { + Table table = getHBase().getTable(dbName, tblName); + if (table == null) { + throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName); + } + // general hbase filter plan from expression tree + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys(), + defaultPartitionName); + if (LOG.isDebugEnabled()) { + LOG.debug("Hbase Filter Plan generated : " + planRes.plan); + } - // results from scans need to be merged as there can be overlapping results between - // the scans. Use a map of list of partition values to partition for this. - Map, Partition> mergedParts = new HashMap, Partition>(); - for (ScanPlan splan : planRes.plan.getPlans()) { - try { - List parts = getHBase().scanPartitions(dbName, tblName, - splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys()), - splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys()), - splan.getFilter(table.getPartitionKeys()), -1); + // results from scans need to be merged as there can be overlapping results between + // the scans. Use a map of list of partition values to partition for this. + Map, Partition> mergedParts = new HashMap, Partition>(); + for (ScanPlan splan : planRes.plan.getPlans()) { + List parts; + if (splan.hasUnknown()) { + parts = getHBase().scanPartitionsInTable(dbName, tblName, -1); + } else { + parts = getHBase().scanPartitions(dbName, tblName, + splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys(), defaultPartitionName), + splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys(), defaultPartitionName), + splan.getFilter(table.getPartitionKeys()), -1); + } boolean reachedMax = false; for (Partition part : parts) { mergedParts.put(part.getValues(), part); @@ -954,33 +1040,38 @@ private boolean getPartitionsByExprInternal(String dbName, String tblName, if (reachedMax) { break; } - } catch (IOException e) { - LOG.error("Unable to get partitions", e); - throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName) - + ": " + e); } - } - for (Entry, Partition> mp : mergedParts.entrySet()) { - result.add(mp.getValue()); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Matched partitions " + result); - } + for (Entry, Partition> mp : mergedParts.entrySet()) { + result.add(mp.getValue()); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Matched partitions " + result); + } - // return true if there might be some additional partitions that don't match filter conditions - // being returned - return !planRes.hasUnsupportedCondition; + // return true if there might be some additional partitions that don't match filter conditions + // being returned + return planRes.hasUnsupportedCondition; + } catch (IOException e) { + LOG.error("Unable to get partitions", e); + throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName) + + ": " + e); + } } @Override public List getPartitionsByNames(String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - List parts = new ArrayList(); - for (String partName : partNames) { - parts.add(getPartition(dbName, tblName, partNameToVals(partName))); + try { + List parts = new ArrayList(); + for (String partName : partNames) { + parts.add(getHBase().getPartition(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName))); + } + return parts; + } catch (IOException e) { + throw new MetaException("Failed to list part names, " + e.getMessage()); } - return parts; } @Override @@ -1166,6 +1257,15 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List g } } + if (global.getGroupPrivileges() != null && groupNames != null && groupNames.size() > 0) { + for (String groupName : groupNames) { + pgi = global.getGroupPrivileges().get(groupName); + if (pgi != null) { + pps.putToGroupPrivileges(groupName, pgi); + } + } + } + if (global.getRolePrivileges() != null) { List roles = getHBase().getUserRoles(userName); if (roles != null) { @@ -1206,6 +1306,15 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, } } + if (db.getPrivileges().getGroupPrivileges() != null && groupNames != null && groupNames.size() > 0) { + for (String groupName : groupNames) { + pgi = db.getPrivileges().getGroupPrivileges().get(groupName); + if (pgi != null) { + pps.putToGroupPrivileges(groupName, pgi); + } + } + } + if (db.getPrivileges().getRolePrivileges() != null) { List roles = getHBase().getUserRoles(userName); if (roles != null) { @@ -1235,6 +1344,8 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableNam boolean commit = false; openTransaction(); try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); Table table = getHBase().getTable(dbName, tableName); List pgi; @@ -1246,6 +1357,15 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableNam } } + if (table.getPrivileges().getGroupPrivileges() != null && groupNames != null && groupNames.size() > 0) { + for (String groupName : groupNames) { + pgi = table.getPrivileges().getGroupPrivileges().get(groupName); + if (pgi != null) { + pps.putToGroupPrivileges(groupName, pgi); + } + } + } + if (table.getPrivileges().getRolePrivileges() != null) { List roles = getHBase().getUserRoles(userName); if (roles != null) { @@ -1273,8 +1393,50 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tabl String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - // We don't support partition privileges - return null; + boolean commit = false; + openTransaction(); + try { + PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); + List partVals = Warehouse.getPartValuesFromPartName(partition); + Partition part = getHBase().getPartition(dbName, tableName, partVals); + List pgi; + if (part.getPrivileges() != null) { + if (part.getPrivileges().getUserPrivileges() != null) { + pgi = part.getPrivileges().getUserPrivileges().get(userName); + if (pgi != null) { + pps.putToUserPrivileges(userName, pgi); + } + } + + if (part.getPrivileges().getGroupPrivileges() != null && groupNames != null && groupNames.size() > 0) { + for (String groupName : groupNames) { + pgi = part.getPrivileges().getGroupPrivileges().get(groupName); + if (pgi != null) { + pps.putToGroupPrivileges(groupName, pgi); + } + } + } + + if (part.getPrivileges().getRolePrivileges() != null) { + List roles = getHBase().getUserRoles(userName); + if (roles != null) { + for (String role : roles) { + pgi = part.getPrivileges().getRolePrivileges().get(role); + if (pgi != null) { + pps.putToRolePrivileges(role, pgi); + } + } + } + } + } + commit = true; + return pps; + } catch (IOException e) { + LOG.error("Unable to get partition privileges for user", e); + throw new MetaException("Unable to get partition privileges for user, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } } @Override @@ -1283,8 +1445,25 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa String userName, List groupNames) throws InvalidObjectException, MetaException { - // We don't support column level privileges - return null; + tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = HiveStringUtils.normalizeIdentifier(dbName); + columnName = HiveStringUtils.normalizeIdentifier(columnName); + + boolean commited = false; + try { + openTransaction(); + PrincipalPrivilegeSet ret = getHBase().getColumnPrivilege(dbName, tableName, + partitionName, columnName); + commited = commitTransaction(); + return ret; + } catch (IOException e) { + LOG.error("Unable to get column privileges for user", e); + throw new MetaException("Unable to get column privileges for user, " + e.getMessage()); + } finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override @@ -1303,6 +1482,10 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa map = pps.getUserPrivileges(); break; + case GROUP: + map = pps.getGroupPrivileges(); + break; + case ROLE: map = pps.getRolePrivileges(); break; @@ -1347,6 +1530,10 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa map = pps.getUserPrivileges(); break; + case GROUP: + map = pps.getGroupPrivileges(); + break; + case ROLE: map = pps.getRolePrivileges(); break; @@ -1392,6 +1579,10 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa map = pps.getUserPrivileges(); break; + case GROUP: + map = pps.getGroupPrivileges(); + break; + case ROLE: map = pps.getRolePrivileges(); break; @@ -1424,8 +1615,89 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa String tableName, List partValues, String partName) { - // We don't support partition grants - return new ArrayList(); + List grants; + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + Partition partition = getHBase().getPartition(dbName, tableName, partValues); + if (partition == null) return privileges; + PrincipalPrivilegeSet pps = partition.getPrivileges(); + if (pps == null) return privileges; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case GROUP: + map = pps.getGroupPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) return privileges; + grants = map.get(principalName); + + if (grants == null || grants.size() == 0) return privileges; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partValues, null), principalName, principalType, pgi)); + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + private List listPrincipalColumnGrants(String principalName, + PrincipalType principalType, String dbName, String tableName, List partVals, + String columnName) throws IOException, MetaException { + List grants; + List privileges = new ArrayList(); + String partitionName = null; + if (partVals != null) { + Table table = getHBase().getTable(dbName, tableName); + partitionName = Warehouse.makePartName(table.getPartitionKeys(), partVals); + } + PrincipalPrivilegeSet pps = getHBase().getColumnPrivilege(dbName, tableName, partitionName, columnName); + if (pps == null) return privileges; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case GROUP: + map = pps.getGroupPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) return privileges; + grants = map.get(principalName); + + if (grants == null || grants.size() == 0) return privileges; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.COLUMN, dbName, + tableName, partVals, columnName), principalName, principalType, pgi)); + } + return privileges; } @Override @@ -1433,8 +1705,18 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa PrincipalType principalType, String dbName, String tableName, String columnName) { - // We don't support column grants - return new ArrayList(); + boolean commit = false; + openTransaction(); + try { + return listPrincipalColumnGrants(principalName, principalType, dbName, tableName, + null, columnName); + } catch (MetaException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } } @Override @@ -1445,8 +1727,18 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa List partVals, String partName, String columnName) { - // We don't support column grants - return new ArrayList(); + boolean commit = false; + openTransaction(); + try { + return listPrincipalColumnGrants(principalName, principalType, dbName, tableName, + partVals, columnName); + } catch (MetaException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } } @Override @@ -1472,6 +1764,9 @@ public boolean grantPrivileges(PrivilegeBag privileges) } commit = true; return true; + } catch (IOException e) { + LOG.error("Unable to grant privilege", e); + throw new MetaException("Error grant privilege " + e.getMessage()); } finally { commitOrRoleBack(commit); } @@ -1498,6 +1793,9 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) th } commit = true; return true; + } catch (IOException e) { + LOG.error("Unable to revoke privilege", e); + throw new MetaException("Error revoke privilege " + e.getMessage()); } finally { commitOrRoleBack(commit); } @@ -1506,13 +1804,21 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) th private static class PrivilegeInfo { Database db; Table table; + Partition partition; + static class ColumnInfo { + String dbName; + String tableName; + String partitionName; + String columnName; + } + ColumnInfo columnInfo; List grants; String typeErrMsg; PrincipalPrivilegeSet privSet; } private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege) - throws MetaException, NoSuchObjectException, InvalidObjectException { + throws MetaException, NoSuchObjectException, InvalidObjectException, IOException { PrivilegeInfo result = new PrivilegeInfo(); switch (privilege.getHiveObject().getObjectType()) { case GLOBAL: @@ -1526,25 +1832,49 @@ private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege break; case DATABASE: - result.db = getDatabase(privilege.getHiveObject().getDbName()); + result.db = getHBase().getDb(privilege.getHiveObject().getDbName()); result.typeErrMsg = "database " + result.db.getName(); result.privSet = createOnNull(result.db.getPrivileges()); break; case TABLE: - result.table = getTable(privilege.getHiveObject().getDbName(), + result.table = getHBase().getTable(privilege.getHiveObject().getDbName(), privilege.getHiveObject().getObjectName()); - result.typeErrMsg = "table " + result.table.getTableName(); + result.typeErrMsg = "table " + result.table.getDbName() + "." + + result.table.getTableName(); result.privSet = createOnNull(result.table.getPrivileges()); break; case PARTITION: + result.partition = getHBase().getPartition(privilege.getHiveObject().getDbName(), + privilege.getHiveObject().getObjectName(), privilege.getHiveObject().getPartValues()); + result.typeErrMsg = "partition " + result.partition.getDbName() + "." + + result.partition.getTableName() + "." + result.partition.getValues(); + result.privSet = createOnNull(result.partition.getPrivileges()); + break; + case COLUMN: - throw new RuntimeException("HBase metastore does not support partition or column " + - "permissions"); + result.columnInfo = new PrivilegeInfo.ColumnInfo(); + result.columnInfo.dbName = privilege.getHiveObject().getDbName(); + result.columnInfo.tableName = privilege.getHiveObject().getObjectName(); + result.columnInfo.columnName = privilege.getHiveObject().getColumnName(); + if (privilege.getHiveObject().getPartValues() != null) { + Table table = getHBase().getTable(privilege.getHiveObject().getDbName(), + privilege.getHiveObject().getObjectName()); + result.columnInfo.partitionName = Warehouse.makePartName(table.getPartitionKeys(), + privilege.getHiveObject().getPartValues()); + } + result.typeErrMsg = "column " + result.columnInfo.dbName + "." + + result.columnInfo.tableName + "." + + result.columnInfo.partitionName!=null?result.columnInfo.partitionName+".":"" + + result.columnInfo.columnName; + result.privSet = createOnNull(getHBase().getColumnPrivilege(result.columnInfo.dbName, + result.columnInfo.tableName, result.columnInfo.partitionName, + result.columnInfo.columnName)); + break; default: - throw new RuntimeException("Woah bad, unknown object type " + + throw new RuntimeException("Unknown object type " + privilege.getHiveObject().getObjectType()); } @@ -1553,19 +1883,21 @@ private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege switch (privilege.getPrincipalType()) { case USER: grantInfos = result.privSet.getUserPrivileges(); - result.typeErrMsg = "user"; + result.typeErrMsg += ",user " + privilege.getPrincipalType(); break; case GROUP: - throw new RuntimeException("HBase metastore does not support group permissions"); + grantInfos = result.privSet.getGroupPrivileges(); + result.typeErrMsg += "group " + privilege.getPrincipalType(); + break; case ROLE: grantInfos = result.privSet.getRolePrivileges(); - result.typeErrMsg = "role"; + result.typeErrMsg += "role " + privilege.getPrincipalType(); break; default: - throw new RuntimeException("Woah bad, unknown principal type " + + throw new RuntimeException("Unknown principal type " + privilege.getPrincipalType()); } @@ -1587,6 +1919,9 @@ private PrincipalPrivilegeSet createOnNull(PrincipalPrivilegeSet pps) { if (pps.getUserPrivileges() == null) { pps.setUserPrivileges(new HashMap>()); } + if (pps.getGroupPrivileges() == null) { + pps.setGroupPrivileges(new HashMap>()); + } if (pps.getRolePrivileges() == null) { pps.setRolePrivileges(new HashMap>()); } @@ -1594,7 +1929,7 @@ private PrincipalPrivilegeSet createOnNull(PrincipalPrivilegeSet pps) { } private void writeBackGrantOrRevoke(HiveObjectPrivilege priv, PrivilegeInfo pi) - throws MetaException, NoSuchObjectException, InvalidObjectException { + throws MetaException, NoSuchObjectException, InvalidObjectException, IOException { // Now write it back switch (priv.getHiveObject().getObjectType()) { case GLOBAL: @@ -1616,8 +1951,19 @@ private void writeBackGrantOrRevoke(HiveObjectPrivilege priv, PrivilegeInfo pi) alterTable(pi.table.getDbName(), pi.table.getTableName(), pi.table); break; + case PARTITION: + pi.partition.setPrivileges(pi.privSet); + alterPartition(pi.partition.getDbName(), pi.partition.getTableName(), + pi.partition.getValues(), pi.partition); + break; + + case COLUMN: + getHBase().putColumnPrivilege(pi.columnInfo.dbName, pi.columnInfo.tableName, + pi.columnInfo.partitionName, pi.columnInfo.columnName, pi.privSet); + break; + default: - throw new RuntimeException("Dude, you missed the second switch!"); + throw new RuntimeException("Unknown privilege type " + priv.getHiveObject().getObjectType()); } } @@ -1688,6 +2034,10 @@ public Role getRole(String roleName) throws NoSuchObjectException { List roles = listRoles(principalName, principalType); List rpgs = new ArrayList(roles.size()); for (Role role : roles) { + if (role.getRoleName().equals(HiveMetaStore.PUBLIC)) { + rpgs.add(new RolePrincipalGrant(HiveMetaStore.PUBLIC, principalName, PrincipalType.USER, + false, 0, null, null)); + } HbaseMetastoreProto.RoleGrantInfoList grants = getHBase().getRolePrincipals(role.getRoleName()); if (grants != null) { for (HbaseMetastoreProto.RoleGrantInfo grant : grants.getGrantInfoList()) { @@ -1735,8 +2085,11 @@ public Role getRole(String roleName) throws NoSuchObjectException { public Partition getPartitionWithAuth(String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException { - // We don't do authorization checks for partitions. - return getPartition(dbName, tblName, partVals); + try { + return getHBase().getPartition(dbName, tblName, partVals); + } catch (IOException e) { + throw new MetaException("Cannot get partiton, " + e.getMessage()); + } } @Override @@ -1751,14 +2104,18 @@ public Partition getPartitionWithAuth(String dbName, String tblName, List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException { - List parts = - listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, null, null); - List partNames = new ArrayList(parts.size()); - for (Partition part : parts) { - partNames.add(buildExternalPartName(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), part.getValues())); + try { + List parts = + listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, null, null); + List partNames = new ArrayList(parts.size()); + for (Partition part : parts) { + partNames.add(buildExternalPartName(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), part.getValues())); + } + return partNames; + } catch (IOException e) { + throw new MetaException("Failed to list part names, " + e.getMessage()); } - return partNames; } @@ -1797,13 +2154,13 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws } String dbName = colStats.getStatsDesc().getDbName(); String tableName = colStats.getStatsDesc().getTableName(); - Table newTable = getTable(dbName, tableName); + Table newTable = getHBase().getTable(dbName, tableName); Table newTableCopy = newTable.deepCopy(); StatsSetupConst.setColumnStatsState(newTableCopy.getParameters(), colNames); getHBase().replaceTable(newTable, newTableCopy); getHBase().updateStatistics(colStats.getStatsDesc().getDbName(), - colStats.getStatsDesc().getTableName(), null, colStats); + colStats.getStatsDesc().getTableName(), null, colNames, colStats); commit = true; return true; @@ -1834,10 +2191,10 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, } StatsSetupConst.setColumnStatsState(new_partCopy.getParameters(), colNames); getHBase().replacePartition(oldPart, new_partCopy, - HBaseUtils.getPartitionKeyTypes(getTable(db_name, tbl_name).getPartitionKeys())); + HBaseUtils.getPartitionKeyTypes(getHBase().getTable(db_name, tbl_name).getPartitionKeys())); getHBase().updateStatistics(colStats.getStatsDesc().getDbName(), - colStats.getStatsDesc().getTableName(), partVals, colStats); + colStats.getStatsDesc().getTableName(), partVals, colNames, colStats); // We need to invalidate aggregates that include this partition getHBase().getStatsCache().invalidate(colStats.getStatsDesc().getDbName(), colStats.getStatsDesc().getTableName(), colStats.getStatsDesc().getPartName()); @@ -1896,15 +2253,35 @@ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - // NOP, stats will be deleted along with the partition when it is dropped. - return true; + boolean commit = false; + openTransaction(); + try { + getHBase().deleteStatistics(dbName, tableName, partVals, colName); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete column statistics", e); + throw new MetaException("Failed to delete column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } } @Override public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - // NOP, stats will be deleted along with the table when it is dropped. - return true; + boolean commit = false; + openTransaction(); + try { + getHBase().deleteStatistics(dbName, tableName, null, colName); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete column statistics", e); + throw new MetaException("Failed to delete column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } } /** @@ -2128,32 +2505,10 @@ public void dropPartitions(String dbName, String tblName, List partNames try { List dbs = getHBase().scanDatabases(null); for (Database db : dbs) { - List grants; - PrincipalPrivilegeSet pps = db.getPrivileges(); if (pps == null) continue; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - - if (map == null) continue; - grants = map.get(principalName); - if (grants == null || grants.size() == 0) continue; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, - db.getName(), null, null, null), principalName, principalType, pgi)); - } + privileges.addAll(convertToHiveObjectPrivileges(pps, principalName, principalType, + HiveObjectType.DATABASE, db.getName(), null, null, null)); } commit = true; return privileges; @@ -2164,6 +2519,70 @@ public void dropPartitions(String dbName, String tblName, List partNames } } + private List convertToHiveObjectPrivileges(PrincipalPrivilegeSet pps, + String principalName, PrincipalType principalType, HiveObjectType objectType, String dbName, + String tableName, List partVals, String columnName) { + List privileges = new ArrayList(); + List grants; + Map> map; + if (principalName != null && principalType != null) { + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case GROUP: + map = pps.getGroupPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) { + return privileges; + } + grants = map.get(principalName); + if (grants == null || grants.size() == 0) { + return privileges; + } + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(objectType, + dbName, tableName, partVals, columnName), principalName, principalType, pgi)); + } + } else { + map = new HashMap>(); + if (pps.getUserPrivileges() != null) { + addPrivilegesMap(pps.getUserPrivileges(), PrincipalType.USER, HiveObjectType.DATABASE, + dbName, tableName, partVals, columnName, privileges); + } + if (pps.getGroupPrivileges() != null) { + addPrivilegesMap(pps.getGroupPrivileges(), PrincipalType.GROUP, HiveObjectType.DATABASE, + dbName, tableName, partVals, columnName, privileges); + } + if (pps.getRolePrivileges() != null) { + addPrivilegesMap(pps.getRolePrivileges(), PrincipalType.ROLE, HiveObjectType.DATABASE, + dbName, tableName, partVals, columnName, privileges); + } + } + return privileges; + } + + private void addPrivilegesMap(Map> m, PrincipalType principalType, + HiveObjectType objectType, String dbName, String tableName, List partVals, + String columnName, List privileges) { + for (Map.Entry> entry : m.entrySet()) { + for (PrivilegeGrantInfo pgi : entry.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(objectType, + dbName, tableName, partVals, columnName), entry.getKey(), principalType, pgi)); + } + } + } + @Override public List listPrincipalTableGrantsAll(String principalName, PrincipalType principalType) { @@ -2177,29 +2596,8 @@ public void dropPartitions(String dbName, String tblName, List partNames PrincipalPrivilegeSet pps = table.getPrivileges(); if (pps == null) continue; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - - if (map == null) continue; - grants = map.get(principalName); - if (grants == null || grants.size() == 0) continue; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, - table.getDbName(), table.getTableName(), null, null), principalName, principalType, - pgi)); - } + privileges.addAll(convertToHiveObjectPrivileges(pps, principalName, principalType, + HiveObjectType.TABLE, table.getDbName(), table.getTableName(), null, null)); } commit = true; return privileges; @@ -2213,19 +2611,82 @@ public void dropPartitions(String dbName, String tblName, List partNames @Override public List listPrincipalPartitionGrantsAll(String principalName, PrincipalType principalType) { - return new ArrayList(); + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + List
tables = getHBase().scanTables(null, null); + for (Table table : tables) { + List partitions = getHBase().scanPartitionsInTable(table.getDbName(), + table.getTableName(), Integer.MAX_VALUE); + + for (Partition partition : partitions) { + PrincipalPrivilegeSet pps = partition.getPrivileges(); + if (pps == null) continue; + privileges.addAll(convertToHiveObjectPrivileges(pps, principalName, principalType, + HiveObjectType.PARTITION, table.getDbName(), table.getTableName(), + partition.getValues(), null)); + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } } @Override public List listPrincipalTableColumnGrantsAll(String principalName, PrincipalType principalType) { - return new ArrayList(); + List privileges = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + List> columnPps = + getHBase().scanColumnPrivilege(ColumnPrivilegeType.TABLE); + for (ObjectPair pair : columnPps) { + privileges.addAll(convertToHiveObjectPrivileges(pair.getSecond(), principalName, + principalType, HiveObjectType.COLUMN, pair.getFirst()[0], pair.getFirst()[1], + null, pair.getFirst()[3])); + } + commited = commitTransaction(); + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override public List listPrincipalPartitionColumnGrantsAll(String principalName, PrincipalType principalType) { - return new ArrayList(); + List privileges = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + List> columnPps = + getHBase().scanColumnPrivilege(ColumnPrivilegeType.PARTITION); + for (ObjectPair pair : columnPps) { + privileges.addAll(convertToHiveObjectPrivileges(pair.getSecond(), principalName, + principalType, HiveObjectType.COLUMN, pair.getFirst()[0], pair.getFirst()[1], + Warehouse.getPartValuesFromPartName(pair.getFirst()[2]), pair.getFirst()[3])); + } + commited = commitTransaction(); + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } catch (MetaException e) { + throw new RuntimeException(e); + } finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override @@ -2236,16 +2697,28 @@ public void dropPartitions(String dbName, String tblName, List partNames try { PrincipalPrivilegeSet pps = getHBase().getGlobalPrivs(); if (pps != null) { - for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, - null, null, null), e.getKey(), PrincipalType.USER, pgi)); + if (pps.getUserPrivileges() != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), e.getKey(), PrincipalType.USER, pgi)); + } } } - for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, - null, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + if (pps.getGroupPrivileges() != null) { + for (Map.Entry> e : pps.getGroupPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), e.getKey(), PrincipalType.GROUP, pgi)); + } + } + } + if (pps.getRolePrivileges() != null) { + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + } } } } @@ -2273,6 +2746,12 @@ public void dropPartitions(String dbName, String tblName, List partNames null, null, null), e.getKey(), PrincipalType.USER, pgi)); } } + for (Map.Entry> e : pps.getGroupPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, + null, null, null), e.getKey(), PrincipalType.GROUP, pgi)); + } + } for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { for (PrivilegeGrantInfo pgi : e.getValue()) { privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, @@ -2293,7 +2772,31 @@ public void dropPartitions(String dbName, String tblName, List partNames public List listPartitionColumnGrantsAll(String dbName, String tableName, String partitionName, String columnName) { - return new ArrayList(); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = HiveStringUtils.normalizeIdentifier(dbName); + columnName = HiveStringUtils.normalizeIdentifier(columnName); + + List privileges = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + PrincipalPrivilegeSet pps = getHBase().getColumnPrivilege(dbName, tableName, + partitionName, columnName); + privileges.addAll(convertToHiveObjectPrivileges(pps, null, null, + HiveObjectType.COLUMN, dbName, tableName, + Warehouse.getPartValuesFromPartName(partitionName), columnName)); + commited = commitTransaction(); + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } catch (MetaException e) { + throw new RuntimeException(e); + } + finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override @@ -2302,19 +2805,33 @@ public void dropPartitions(String dbName, String tblName, List partNames boolean commit = false; openTransaction(); try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); Table table = getHBase().getTable(dbName, tableName); PrincipalPrivilegeSet pps = table.getPrivileges(); if (pps != null) { - for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, - tableName, null, null), e.getKey(), PrincipalType.USER, pgi)); + if (pps.getUserPrivileges() != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), e.getKey(), PrincipalType.USER, pgi)); + } } } - for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, - tableName, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + if (pps.getGroupPrivileges() != null) { + for (Map.Entry> e : pps.getGroupPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), e.getKey(), PrincipalType.GROUP, pgi)); + } + } + } + if (pps.getRolePrivileges() != null) { + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + } } } } @@ -2330,13 +2847,74 @@ public void dropPartitions(String dbName, String tblName, List partNames @Override public List listPartitionGrantsAll(String dbName, String tableName, String partitionName) { - return new ArrayList(); + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + Table table = getHBase().getTable(dbName, tableName); + List partVals = Warehouse.getPartValuesFromPartName(partitionName); + List partitions = getHBase().scanPartitionsInTable(table.getDbName(), + table.getTableName(), Integer.MAX_VALUE); + for (Partition partition : partitions) { + PrincipalPrivilegeSet pps = partition.getPrivileges(); + if (pps != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partVals, null), e.getKey(), PrincipalType.USER, pgi)); + } + } + for (Map.Entry> e : pps.getGroupPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partVals, null), e.getKey(), PrincipalType.GROUP, pgi)); + } + } + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partVals, null), e.getKey(), PrincipalType.ROLE, pgi)); + } + } + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } catch (MetaException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } } @Override public List listTableColumnGrantsAll(String dbName, String tableName, String columnName) { - return new ArrayList(); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = HiveStringUtils.normalizeIdentifier(dbName); + columnName = HiveStringUtils.normalizeIdentifier(columnName); + + List privileges = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + PrincipalPrivilegeSet pps = getHBase().getColumnPrivilege(dbName, tableName, + null, columnName); + privileges.addAll(convertToHiveObjectPrivileges(pps, null, null, + HiveObjectType.COLUMN, dbName, tableName, null, columnName)); + commited = commitTransaction(); + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override @@ -2438,22 +3016,20 @@ public Function getFunction(String dbName, String funcName) throws MetaException @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { - throw new UnsupportedOperationException(); + return new NotificationEventResponse(); } @Override public void addNotificationEvent(NotificationEvent event) { - throw new UnsupportedOperationException(); } @Override public void cleanNotificationEvents(int olderThan) { - throw new UnsupportedOperationException(); } @Override public CurrentNotificationEventId getCurrentNotificationEventId() { - throw new UnsupportedOperationException(); + return new CurrentNotificationEventId(0); } @Override @@ -2528,8 +3104,8 @@ private String buildExternalPartName(Table table, Partition part) { } private String buildExternalPartName(String dbName, String tableName, List partVals) - throws MetaException { - return buildExternalPartName(getTable(dbName, tableName), partVals); + throws MetaException, IOException { + return buildExternalPartName(getHBase().getTable(dbName, tableName), partVals); } private Set findUsersToRemapRolesFor(Role role, String principalName, PrincipalType type) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index 54daa4a..3438133 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -38,6 +38,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; @@ -45,6 +46,8 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Date; +import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; import org.apache.hadoop.hive.metastore.api.Decimal; import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; @@ -179,6 +182,7 @@ static HbaseMetastoreProto.PrincipalType convertPrincipalTypes(PrincipalType type) { switch (type) { case USER: return HbaseMetastoreProto.PrincipalType.USER; + case GROUP: return HbaseMetastoreProto.PrincipalType.GROUP; case ROLE: return HbaseMetastoreProto.PrincipalType.ROLE; default: throw new RuntimeException("Unknown principal type " + type.toString()); } @@ -192,6 +196,7 @@ static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType type) { switch (type) { case USER: return PrincipalType.USER; + case GROUP: return PrincipalType.GROUP; case ROLE: return PrincipalType.ROLE; default: throw new RuntimeException("Unknown principal type " + type.toString()); } @@ -230,6 +235,9 @@ static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType typ if (pps.getUserPrivileges() != null) { builder.addAllUsers(buildPrincipalPrivilegeSetEntry(pps.getUserPrivileges())); } + if (pps.getGroupPrivileges() != null) { + builder.addAllGroups(buildPrincipalPrivilegeSetEntry(pps.getGroupPrivileges())); + } if (pps.getRolePrivileges() != null) { builder.addAllRoles(buildPrincipalPrivilegeSetEntry(pps.getRolePrivileges())); } @@ -239,11 +247,15 @@ static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType typ private static PrincipalPrivilegeSet buildPrincipalPrivilegeSet( HbaseMetastoreProto.PrincipalPrivilegeSet proto) throws InvalidProtocolBufferException { PrincipalPrivilegeSet pps = null; - if (!proto.getUsersList().isEmpty() || !proto.getRolesList().isEmpty()) { + if (!proto.getUsersList().isEmpty() || !proto.getUsersList().isEmpty() + || !proto.getRolesList().isEmpty()) { pps = new PrincipalPrivilegeSet(); if (!proto.getUsersList().isEmpty()) { pps.setUserPrivileges(convertPrincipalPrivilegeSetEntries(proto.getUsersList())); } + if (!proto.getGroupsList().isEmpty()) { + pps.setGroupPrivileges(convertPrincipalPrivilegeSetEntries(proto.getGroupsList())); + } if (!proto.getRolesList().isEmpty()) { pps.setRolePrivileges(convertPrincipalPrivilegeSetEntries(proto.getRolesList())); } @@ -649,11 +661,11 @@ private static ResourceType convertResourceTypes( } } if (sd.getBucketCols() != null) { - SortedSet bucketCols = new TreeSet<>(sd.getBucketCols()); + List bucketCols = new ArrayList<>(sd.getBucketCols()); for (String bucket : bucketCols) md.update(bucket.getBytes(ENCODING)); } if (sd.getSortCols() != null) { - SortedSet orders = new TreeSet<>(sd.getSortCols()); + List orders = new ArrayList<>(sd.getSortCols()); for (Order order : orders) { md.update(order.getCol().getBytes(ENCODING)); md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); @@ -685,7 +697,7 @@ private static ResourceType convertResourceTypes( } } } - + md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); return md.digest(); } @@ -758,43 +770,64 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) * @param sdHash hash that is being used as a key for the enclosed storage descriptor * @return First element is the key, second is the serialized partition */ - static byte[][] serializePartition(Partition part, List partTypes, byte[] sdHash) { + static byte[][] serializePartition(Partition part, List partTypes, byte[] sdHash, + String defaultPartitionName) { byte[][] result = new byte[2][]; - result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), partTypes, part.getValues()); + result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), partTypes, part.getValues(), + defaultPartitionName); HbaseMetastoreProto.Partition.Builder builder = HbaseMetastoreProto.Partition.newBuilder(); builder .setCreateTime(part.getCreateTime()) .setLastAccessTime(part.getLastAccessTime()); - if (part.getSd().getLocation() != null) builder.setLocation(part.getSd().getLocation()); - if (part.getSd().getParameters() != null) { + if (part.getSd() != null && part.getSd().getLocation() != null) { + builder.setLocation(part.getSd().getLocation()); + } + if (part.getSd() != null && part.getSd().getParameters() != null) { builder.setSdParameters(buildParameters(part.getSd().getParameters())); } - builder.setSdHash(ByteString.copyFrom(sdHash)); + if (sdHash != null) { + builder.setSdHash(ByteString.copyFrom(sdHash)); + } + if (part.getPrivileges() != null) { + builder.setPrivileges(buildPrincipalPrivilegeSet(part.getPrivileges())); + } if (part.getParameters() != null) builder.setParameters(buildParameters(part.getParameters())); result[1] = builder.build().toByteArray(); return result; } - static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals) { - return buildPartitionKey(dbName, tableName, partTypes, partVals, false); + static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, + String defaultPartitionName) { + return buildPartitionKey(dbName, tableName, partTypes, partVals, false, defaultPartitionName); } - static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, boolean endPrefix) { + static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, boolean endPrefix, + String defaultPartitionName) { Object[] components = new Object[partVals.size()]; + boolean lastIsDefault = false; for (int i=0;i partTypes, Object[] components, boolean endPrefix) { + static byte[] buildSerializedPartitionKey(String dbName, String tableName, List partTypes, + Object[] components, boolean endPrefix, boolean lastIsDefault) { ObjectInspector javaStringOI = PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(PrimitiveCategory.STRING); Object[] data = new Object[components.length+2]; @@ -818,7 +851,7 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) } Output output = new Output(); try { - BinarySortableSerDeWithEndPrefix.serializeStruct(output, data, fois, endPrefix); + BinarySortableSerDeWithEndPrefix.serializeStruct(output, data, fois, endPrefix, lastIsDefault); } catch (SerDeException e) { throw new RuntimeException("Cannot serialize partition " + StringUtils.join(components, ",")); } @@ -835,10 +868,13 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) } static void assembleStorageDescriptor(StorageDescriptor sd, StorageDescriptorParts parts) { - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setLocation(parts.location); - ssd.setParameters(parts.parameters); - ssd.setShared(sd); + SharedStorageDescriptor ssd = null; + if (sd != null) { + ssd = new SharedStorageDescriptor(); + ssd.setLocation(parts.location); + ssd.setParameters(parts.parameters); + ssd.setShared(sd); + } if (parts.containingPartition != null) { parts.containingPartition.setSd(ssd); } else if (parts.containingTable != null) { @@ -939,8 +975,9 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa part.setLastAccessTime((int)proto.getLastAccessTime()); if (proto.hasLocation()) sdParts.location = proto.getLocation(); if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); - sdParts.sdHash = proto.getSdHash().toByteArray(); + if (proto.hasSdHash()) sdParts.sdHash = proto.getSdHash().toByteArray(); if (proto.hasParameters()) part.setParameters(buildParameters(proto.getParameters())); + if (proto.hasPrivileges()) part.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); return sdParts; } @@ -1045,7 +1082,9 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa if (table.getSd().getParameters() != null) { builder.setSdParameters(buildParameters(table.getSd().getParameters())); } - builder.setSdHash(ByteString.copyFrom(sdHash)); + if (sdHash != null) { + builder.setSdHash(ByteString.copyFrom(sdHash)); + } if (table.getPartitionKeys() != null) { builder.addAllPartitionKeys(convertFieldSchemaListToProto(table.getPartitionKeys())); } @@ -1058,7 +1097,21 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa if (table.getViewExpandedText() != null) { builder.setViewExpandedText(table.getViewExpandedText()); } - if (table.getTableType() != null) builder.setTableType(table.getTableType()); + // If the table has property EXTERNAL set, update table type + // accordingly + String tableType = table.getTableType(); + boolean isExternal = "TRUE".equals(table.getParameters().get("EXTERNAL")); + if (TableType.MANAGED_TABLE.toString().equals(tableType)) { + if (isExternal) { + tableType = TableType.EXTERNAL_TABLE.toString(); + } + } + if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) { + if (!isExternal) { + tableType = TableType.MANAGED_TABLE.toString(); + } + } + if (tableType != null) builder.setTableType(tableType); if (table.getPrivileges() != null) { builder.setPrivileges(buildPrincipalPrivilegeSet(table.getPrivileges())); } @@ -1106,12 +1159,23 @@ static StorageDescriptorParts deserializeTable(String dbName, String tableName, table.setRetention((int)proto.getRetention()); if (proto.hasLocation()) sdParts.location = proto.getLocation(); if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); - sdParts.sdHash = proto.getSdHash().toByteArray(); + if (proto.hasSdHash()) sdParts.sdHash = proto.getSdHash().toByteArray(); table.setPartitionKeys(convertFieldSchemaListFromProto(proto.getPartitionKeysList())); table.setParameters(buildParameters(proto.getParameters())); if (proto.hasViewOriginalText()) table.setViewOriginalText(proto.getViewOriginalText()); if (proto.hasViewExpandedText()) table.setViewExpandedText(proto.getViewExpandedText()); - table.setTableType(proto.getTableType()); + String tableType = proto.getTableType(); + if (tableType == null) { + // for backwards compatibility with old metastore persistence + if (table.getViewOriginalText() != null) { + tableType = TableType.VIRTUAL_VIEW.toString(); + } else if ("TRUE".equals(table.getParameters().get("EXTERNAL"))) { + tableType = TableType.EXTERNAL_TABLE.toString(); + } else { + tableType = TableType.MANAGED_TABLE.toString(); + } + } + table.setTableType(tableType); if (proto.hasPrivileges()) { table.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); } @@ -1229,6 +1293,151 @@ static StorageDescriptorParts deserializeIndex(String dbName, String origTableNa return proto.toByteArray(); } + static ColumnStatisticsObj mergeColumnStatisticsForOneColumn(ColumnStatisticsObj orig, + ColumnStatisticsObj updated) { + ColumnStatisticsData origColData = orig.getStatsData(); + ColumnStatisticsData updatedColData = updated.getStatsData(); + switch (updatedColData.getSetField()) { + case BOOLEAN_STATS: + BooleanColumnStatsData origBoolData = origColData.getBooleanStats(); + BooleanColumnStatsData updatedBoolData = updatedColData.getBooleanStats(); + if (updatedBoolData.isSetNumNulls()) { + origBoolData.setNumNulls(updatedBoolData.getNumNulls()); + } + if (updatedBoolData.isSetBitVectors()) { + origBoolData.setBitVectors(updatedBoolData.getBitVectors()); + } + if (updatedBoolData.isSetNumTrues()) { + origBoolData.setNumTrues(updatedBoolData.getNumTrues()); + } + if (updatedBoolData.isSetNumFalses()) { + origBoolData.setNumFalses(updatedBoolData.getNumFalses()); + } + break; + + case LONG_STATS: + LongColumnStatsData origLongData = origColData.getLongStats(); + LongColumnStatsData updatedLongData = updatedColData.getLongStats(); + if (updatedLongData.isSetNumNulls()) { + origLongData.setNumNulls(updatedLongData.getNumNulls()); + } + if (updatedLongData.isSetNumDVs()) { + origLongData.setNumDVs(updatedLongData.getNumDVs()); + } + if (updatedLongData.isSetBitVectors()) { + origLongData.setBitVectors(updatedLongData.getBitVectors()); + } + if (updatedLongData.isSetLowValue()) { + origLongData.setLowValue(updatedLongData.getLowValue()); + } + if (updatedLongData.isSetHighValue()) { + origLongData.setHighValue(updatedLongData.getHighValue()); + } + break; + + case DOUBLE_STATS: + DoubleColumnStatsData origDoubleData = origColData.getDoubleStats(); + DoubleColumnStatsData updatedDoubleData = updatedColData.getDoubleStats(); + if (updatedDoubleData.isSetNumNulls()) { + origDoubleData.setNumNulls(updatedDoubleData.getNumNulls()); + } + if (updatedDoubleData.isSetNumDVs()) { + origDoubleData.setNumDVs(updatedDoubleData.getNumDVs()); + } + if (updatedDoubleData.isSetBitVectors()) { + origDoubleData.setBitVectors(updatedDoubleData.getBitVectors()); + } + if (updatedDoubleData.isSetLowValue()) { + origDoubleData.setLowValue(updatedDoubleData.getLowValue()); + } + if (updatedDoubleData.isSetHighValue()) { + origDoubleData.setHighValue(updatedDoubleData.getHighValue()); + } + break; + + case STRING_STATS: + StringColumnStatsData origStringData = origColData.getStringStats(); + StringColumnStatsData updatedStringData = updatedColData.getStringStats(); + if (updatedStringData.isSetNumNulls()) { + origStringData.setNumNulls(updatedStringData.getNumNulls()); + } + if (updatedStringData.isSetNumDVs()) { + origStringData.setNumDVs(updatedStringData.getNumDVs()); + } + if (updatedStringData.isSetBitVectors()) { + origStringData.setBitVectors(updatedStringData.getBitVectors()); + } + if (updatedStringData.isSetMaxColLen()) { + origStringData.setMaxColLen(updatedStringData.getMaxColLen()); + } + if (updatedStringData.isSetAvgColLen()) { + origStringData.setAvgColLen(updatedStringData.getAvgColLen()); + } + break; + + case BINARY_STATS: + BinaryColumnStatsData origBinaryData = origColData.getBinaryStats(); + BinaryColumnStatsData updatedBinaryData = updatedColData.getBinaryStats(); + if (updatedBinaryData.isSetNumNulls()) { + origBinaryData.setNumNulls(updatedBinaryData.getNumNulls()); + } + if (updatedBinaryData.isSetBitVectors()) { + origBinaryData.setBitVectors(updatedBinaryData.getBitVectors()); + } + if (updatedBinaryData.isSetMaxColLen()) { + origBinaryData.setMaxColLen(updatedBinaryData.getMaxColLen()); + } + if (updatedBinaryData.isSetAvgColLen()) { + origBinaryData.setAvgColLen(updatedBinaryData.getAvgColLen()); + } + break; + + case DECIMAL_STATS: + DecimalColumnStatsData origDecimalData = origColData.getDecimalStats(); + DecimalColumnStatsData updatedDecimalData = updatedColData.getDecimalStats(); + if (updatedDecimalData.isSetNumNulls()) { + origDecimalData.setNumNulls(updatedDecimalData.getNumNulls()); + } + if (updatedDecimalData.isSetNumDVs()) { + origDecimalData.setNumDVs(updatedDecimalData.getNumDVs()); + } + if (updatedDecimalData.isSetBitVectors()) { + origDecimalData.setBitVectors(updatedDecimalData.getBitVectors()); + } + if (updatedDecimalData.isSetLowValue()) { + origDecimalData.setLowValue(updatedDecimalData.getLowValue()); + } + if (updatedDecimalData.isSetHighValue()) { + origDecimalData.setHighValue(updatedDecimalData.getHighValue()); + } + break; + + case DATE_STATS: + DateColumnStatsData origDateData = origColData.getDateStats(); + DateColumnStatsData updatedDateData = updatedColData.getDateStats(); + if (updatedDateData.isSetNumNulls()) { + origDateData.setNumNulls(updatedDateData.getNumNulls()); + } + if (updatedDateData.isSetNumDVs()) { + origDateData.setNumDVs(updatedDateData.getNumDVs()); + } + if (updatedDateData.isSetBitVectors()) { + origDateData.setBitVectors(updatedDateData.getBitVectors()); + } + if (updatedDateData.isSetLowValue()) { + origDateData.setLowValue(updatedDateData.getLowValue()); + } + if (updatedDateData.isSetHighValue()) { + origDateData.setHighValue(updatedDateData.getHighValue()); + } + break; + + default: + throw new RuntimeException("Woh, bad. Unknown stats type!"); + } + return orig; + } + private static HbaseMetastoreProto.ColumnStats protoBufStatsForOneColumn( ColumnStatistics partitionColumnStats, ColumnStatisticsObj colStats) throws IOException { HbaseMetastoreProto.ColumnStats.Builder builder = HbaseMetastoreProto.ColumnStats.newBuilder(); @@ -1244,78 +1453,162 @@ static StorageDescriptorParts deserializeIndex(String dbName, String origTableNa switch (colData.getSetField()) { case BOOLEAN_STATS: BooleanColumnStatsData boolData = colData.getBooleanStats(); - builder.setNumNulls(boolData.getNumNulls()); - builder.setBoolStats(HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder() - .setNumTrues(boolData.getNumTrues()).setNumFalses(boolData.getNumFalses()).build()); + if (boolData.isSetNumNulls()) { + builder.setNumNulls(boolData.getNumNulls()); + } + if (boolData.isSetBitVectors()) { + builder.setBitVectors(boolData.getBitVectors()); + } + HbaseMetastoreProto.ColumnStats.BooleanStats.Builder boolBuilder = + HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder(); + if (boolData.isSetNumTrues()) { + boolBuilder.setNumTrues(boolData.getNumTrues()); + } + if (boolData.isSetNumFalses()) { + boolBuilder.setNumFalses(boolData.getNumFalses()); + } + builder.setBoolStats(boolBuilder.build()); break; case LONG_STATS: LongColumnStatsData longData = colData.getLongStats(); - builder.setNumNulls(longData.getNumNulls()); - builder.setNumDistinctValues(longData.getNumDVs()); + if (longData.isSetNumNulls()) { + builder.setNumNulls(longData.getNumNulls()); + } + if (longData.isSetNumDVs()) { + builder.setNumDistinctValues(longData.getNumDVs()); + } if (longData.isSetBitVectors()) { builder.setBitVectors(longData.getBitVectors()); } - builder.setLongStats(HbaseMetastoreProto.ColumnStats.LongStats.newBuilder() - .setLowValue(longData.getLowValue()).setHighValue(longData.getHighValue()).build()); + HbaseMetastoreProto.ColumnStats.LongStats.Builder longBuilder = + HbaseMetastoreProto.ColumnStats.LongStats.newBuilder(); + if (longData.isSetLowValue()) { + longBuilder.setLowValue(longData.getLowValue()); + } + if (longData.isSetHighValue()) { + longBuilder.setHighValue(longData.getHighValue()); + } + builder.setLongStats(longBuilder.build()); break; case DOUBLE_STATS: DoubleColumnStatsData doubleData = colData.getDoubleStats(); - builder.setNumNulls(doubleData.getNumNulls()); - builder.setNumDistinctValues(doubleData.getNumDVs()); + if (doubleData.isSetNumNulls()) { + builder.setNumNulls(doubleData.getNumNulls()); + } + if (doubleData.isSetNumDVs()) { + builder.setNumDistinctValues(doubleData.getNumDVs()); + } if (doubleData.isSetBitVectors()) { builder.setBitVectors(doubleData.getBitVectors()); } - builder.setDoubleStats(HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder() - .setLowValue(doubleData.getLowValue()).setHighValue(doubleData.getHighValue()).build()); + HbaseMetastoreProto.ColumnStats.DoubleStats.Builder doubleBuilder = + HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder(); + if (doubleData.isSetLowValue()) { + doubleBuilder.setLowValue(doubleData.getLowValue()); + } + if (doubleData.isSetHighValue()) { + doubleBuilder.setHighValue(doubleData.getHighValue()); + } + builder.setDoubleStats(doubleBuilder.build()); break; case STRING_STATS: StringColumnStatsData stringData = colData.getStringStats(); - builder.setNumNulls(stringData.getNumNulls()); - builder.setNumDistinctValues(stringData.getNumDVs()); + if (stringData.isSetNumNulls()) { + builder.setNumNulls(stringData.getNumNulls()); + } + if (stringData.isSetNumDVs()) { + builder.setNumDistinctValues(stringData.getNumDVs()); + } if (stringData.isSetBitVectors()) { builder.setBitVectors(stringData.getBitVectors()); } - builder.setStringStats(HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() - .setMaxColLength(stringData.getMaxColLen()).setAvgColLength(stringData.getAvgColLen()) - .build()); + HbaseMetastoreProto.ColumnStats.StringStats.Builder stringBuilder = + HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(); + if (stringData.isSetMaxColLen()) { + stringBuilder.setMaxColLength(stringData.getMaxColLen()); + } + if (stringData.isSetAvgColLen()) { + stringBuilder.setAvgColLength(stringData.getAvgColLen()); + } + builder.setStringStats(stringBuilder.build()); break; case BINARY_STATS: BinaryColumnStatsData binaryData = colData.getBinaryStats(); - builder.setNumNulls(binaryData.getNumNulls()); - builder.setBinaryStats(HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() - .setMaxColLength(binaryData.getMaxColLen()).setAvgColLength(binaryData.getAvgColLen()) - .build()); + if (binaryData.isSetNumNulls()) { + builder.setNumNulls(binaryData.getNumNulls()); + } + HbaseMetastoreProto.ColumnStats.StringStats.Builder binaryBuilder = + HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(); + if (binaryData.isSetMaxColLen()) { + binaryBuilder.setMaxColLength(binaryData.getMaxColLen()); + } + if (binaryData.isSetAvgColLen()) { + binaryBuilder.setAvgColLength(binaryData.getAvgColLen()); + } + builder.setBinaryStats(binaryBuilder.build()); break; case DECIMAL_STATS: DecimalColumnStatsData decimalData = colData.getDecimalStats(); - builder.setNumNulls(decimalData.getNumNulls()); - builder.setNumDistinctValues(decimalData.getNumDVs()); + if (decimalData.isSetNumNulls()) { + builder.setNumNulls(decimalData.getNumNulls()); + } + if (decimalData.isSetNumDVs()) { + builder.setNumDistinctValues(decimalData.getNumDVs()); + } if (decimalData.isSetBitVectors()) { builder.setBitVectors(decimalData.getBitVectors()); } if (decimalData.getLowValue() != null && decimalData.getHighValue() != null) { - builder.setDecimalStats( - HbaseMetastoreProto.ColumnStats.DecimalStats - .newBuilder() - .setLowValue( - HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() - .setUnscaled(ByteString.copyFrom(decimalData.getLowValue().getUnscaled())) - .setScale(decimalData.getLowValue().getScale()).build()) - .setHighValue( - HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() - .setUnscaled(ByteString.copyFrom(decimalData.getHighValue().getUnscaled())) - .setScale(decimalData.getHighValue().getScale()).build())).build(); + HbaseMetastoreProto.ColumnStats.DecimalStats.Builder decimalBuilder = + HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder(); + if (decimalData.isSetLowValue()) { + decimalBuilder.setLowValue( + HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + .setUnscaled(ByteString.copyFrom(decimalData.getLowValue().getUnscaled())) + .setScale(decimalData.getLowValue().getScale()).build()); + } + if (decimalData.isSetHighValue()) { + decimalBuilder.setHighValue( + HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + .setUnscaled(ByteString.copyFrom(decimalData.getHighValue().getUnscaled())) + .setScale(decimalData.getHighValue().getScale()).build()); + } + builder.setDecimalStats(decimalBuilder.build()); } else { builder.setDecimalStats(HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder().clear() .build()); } break; + case DATE_STATS: + DateColumnStatsData dateData = colData.getDateStats(); + if (dateData.isSetNumNulls()) { + builder.setNumNulls(dateData.getNumNulls()); + } + if (dateData.isSetNumDVs()) { + builder.setNumDistinctValues(dateData.getNumDVs()); + } + if (dateData.isSetBitVectors()) { + builder.setBitVectors(dateData.getBitVectors()); + } + HbaseMetastoreProto.ColumnStats.DateStats.Builder dateBuilder = + HbaseMetastoreProto.ColumnStats.DateStats.newBuilder(); + if (dateData.isSetLowValue()) { + dateBuilder.setLowValue(HbaseMetastoreProto.ColumnStats.Date.newBuilder() + .setDaysSinceEpoch(dateData.getLowValue().getDaysSinceEpoch())); + } + if (dateData.isSetHighValue()) { + dateBuilder.setHighValue(HbaseMetastoreProto.ColumnStats.Date.newBuilder() + .setDaysSinceEpoch(dateData.getHighValue().getDaysSinceEpoch())); + } + builder.setDateStats(dateBuilder.build()); + break; + default: throw new RuntimeException("Woh, bad. Unknown stats type!"); } @@ -1408,6 +1701,22 @@ static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics partiti decimalData.setNumDVs(proto.getNumDistinctValues()); decimalData.setBitVectors(proto.getBitVectors()); colData.setDecimalStats(decimalData); + } else if (proto.hasDateStats()) { + DateColumnStatsData dateData = new DateColumnStatsData(); + if (proto.getDateStats().hasLowValue()) { + Date loVal = new Date(); + loVal.setDaysSinceEpoch(proto.getDateStats().getLowValue().getDaysSinceEpoch()); + dateData.setLowValue(loVal); + } + if (proto.getDateStats().hasHighValue()) { + Date hiVal = new Date(); + hiVal.setDaysSinceEpoch(proto.getDateStats().getHighValue().getDaysSinceEpoch()); + dateData.setHighValue(hiVal); + } + dateData.setNumNulls(proto.getNumNulls()); + dateData.setNumDVs(proto.getNumDistinctValues()); + dateData.setBitVectors(proto.getBitVectors()); + colData.setDateStats(dateData); } else { throw new RuntimeException("Woh, bad. Unknown stats type!"); } @@ -1525,4 +1834,26 @@ static String deserializeMasterKey(byte[] value) throws InvalidProtocolBufferExc public static double getDoubleValue(Decimal decimal) { return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); } + + static List
getSharedTablesNoPriv(List
tables) { + List
sharedTables = new ArrayList
(); + for (Table t : tables) { + SharedTable sTable = new SharedTable(); + sTable.setShared(t); + sTable.unsetPrivileges(); + sharedTables.add(sTable); + } + return sharedTables; + } + + static List getSharedPartitionsNoPriv(List parts) { + List sharedParts = new ArrayList(); + for (Partition part : parts) { + SharedPartition sPart = new SharedPartition(); + sPart.setShared(part); + sPart.unsetPrivileges(); + sharedParts.add(sPart); + } + return sharedParts; + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java index 2b0863d..323005b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java @@ -264,7 +264,8 @@ public int compareTo(byte[] value, int offset, int length) { NativeOperator nativeOp = nativeOps.get(i); switch (op.type) { case LIKE: - if (!deserializedkeys.get(nativeOp.pos).toString().matches(op.val)) { + if (!(deserializedkeys.get(nativeOp.pos)!=null?deserializedkeys.get(nativeOp.pos):"") + .toString().matches(op.val)) { if (LOG.isDebugEnabled()) { LOG.debug("Fail to match operator " + op.keyName + "(" + deserializedkeys.get(nativeOp.pos) + ") LIKE " + nativeOp.val); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java index 18f8afc..daefbb9 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java @@ -91,12 +91,12 @@ public AggrStats load(StatsCacheKey key) throws Exception { if (aggrStats == null) { misses.incr(); ColumnStatsAggregator aggregator = null; - aggrStats = new AggrStats(); LOG.debug("Unable to find aggregated stats for " + key.colName + ", aggregating"); List css = hrw.getPartitionStatistics(key.dbName, key.tableName, key.partNames, HBaseStore.partNameListToValsList(key.partNames), Collections.singletonList(key.colName)); if (css != null && css.size() > 0) { + aggrStats = new AggrStats(); aggrStats.setPartsFound(css.size()); if (aggregator == null) { aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(css.iterator() diff --git a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto index 6fbe36c..c49a7cf 100644 --- a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto +++ b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -19,7 +19,8 @@ package org.apache.hadoop.hive.metastore.hbase; enum PrincipalType { USER = 0; - ROLE = 1; + GROUP = 1; + ROLE = 2; } message AggrStats { @@ -68,6 +69,15 @@ message ColumnStats { optional double high_value = 2; } + message Date { + required int64 daysSinceEpoch = 1; + } + + message DateStats { + optional Date low_value = 1; + optional Date high_value = 2; + } + message StringStats { optional int64 max_col_length = 1; optional double avg_col_length = 2; @@ -92,8 +102,9 @@ message ColumnStats { optional StringStats string_stats = 8; optional StringStats binary_stats = 9; optional DecimalStats decimal_stats = 10; - optional string column_name = 11; - optional string bit_vectors = 12; + optional DateStats date_stats = 11; + optional string column_name = 12; + optional string bit_vectors = 13; } message Database { @@ -156,9 +167,9 @@ message Partition { optional int64 last_access_time = 2; optional string location = 3; optional Parameters sd_parameters = 4; // storage descriptor parameters - required bytes sd_hash = 5; + optional bytes sd_hash = 5; optional Parameters parameters = 6; // partition parameters - // We don't support partition level privileges + optional PrincipalPrivilegeSet privileges = 7; } message PrincipalPrivilegeSetEntry { @@ -168,7 +179,8 @@ message PrincipalPrivilegeSetEntry { message PrincipalPrivilegeSet { repeated PrincipalPrivilegeSetEntry users = 1; - repeated PrincipalPrivilegeSetEntry roles = 2; + repeated PrincipalPrivilegeSetEntry groups = 2; + repeated PrincipalPrivilegeSetEntry roles = 3; } message PrivilegeGrantInfo { @@ -247,7 +259,7 @@ message Table { optional int64 retention = 4; optional string location = 5; optional Parameters sd_parameters = 6; // storage descriptor parameters - required bytes sd_hash = 7; + optional bytes sd_hash = 7; repeated FieldSchema partition_keys = 8; optional Parameters parameters = 9; optional string view_original_text = 10; diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java index 06884b3..25415a2 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.PartFilterExprUtil; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -44,6 +45,7 @@ public class TestHBaseFilterPlanUtil { final boolean INCLUSIVE = true; + final String defaultPartitionName = HiveConf.ConfVars.DEFAULTPARTITIONNAME.defaultStrVal; /** * Test the function that compares byte arrays @@ -89,10 +91,10 @@ public void testgetComparedMarker() { l = new ScanMarker("1", !INCLUSIVE, "int"); // the rule for null vs non-null is different // non-null is both smaller and greater than null - Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, true)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, true)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, false)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, false)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, true, defaultPartitionName)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, true, defaultPartitionName)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, false, defaultPartitionName)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, false, defaultPartitionName)); // create l that is greater because of the bytes l = new ScanMarker("2", INCLUSIVE, "int"); @@ -102,10 +104,10 @@ public void testgetComparedMarker() { } private void assertFirstGreater(ScanMarker big, ScanMarker small) { - Assert.assertEquals(big, ScanPlan.getComparedMarker(big, small, true)); - Assert.assertEquals(big, ScanPlan.getComparedMarker(small, big, true)); - Assert.assertEquals(small, ScanPlan.getComparedMarker(big, small, false)); - Assert.assertEquals(small, ScanPlan.getComparedMarker(small, big, false)); + Assert.assertEquals(big, ScanPlan.getComparedMarker(big, small, true, defaultPartitionName)); + Assert.assertEquals(big, ScanPlan.getComparedMarker(small, big, true, defaultPartitionName)); + Assert.assertEquals(small, ScanPlan.getComparedMarker(big, small, false, defaultPartitionName)); + Assert.assertEquals(small, ScanPlan.getComparedMarker(small, big, false, defaultPartitionName)); } /** @@ -113,8 +115,8 @@ private void assertFirstGreater(ScanMarker big, ScanMarker small) { */ @Test public void testScanPlanAnd() { - ScanPlan l = new ScanPlan(); - ScanPlan r = new ScanPlan(); + ScanPlan l = new ScanPlan(defaultPartitionName); + ScanPlan r = new ScanPlan(defaultPartitionName); l.setStartMarker("a", "int", "10", INCLUSIVE); r.setStartMarker("a", "int", "10", INCLUSIVE); @@ -147,8 +149,8 @@ public void testScanPlanAnd() { */ @Test public void testScanPlanOr() { - ScanPlan l = new ScanPlan(); - ScanPlan r = new ScanPlan(); + ScanPlan l = new ScanPlan(defaultPartitionName); + ScanPlan r = new ScanPlan(defaultPartitionName); l.setStartMarker("a", "int", "1", INCLUSIVE); r.setStartMarker("a", "int", "11", INCLUSIVE); @@ -167,17 +169,17 @@ public void testScanPlanOr() { @Test public void testMultiScanPlanOr() { - MultiScanPlan l = createMultiScanPlan(new ScanPlan()); - MultiScanPlan r = createMultiScanPlan(new ScanPlan()); + MultiScanPlan l = createMultiScanPlan(new ScanPlan(defaultPartitionName)); + MultiScanPlan r = createMultiScanPlan(new ScanPlan(defaultPartitionName)); // verify OR of two multi plans with one plan each Assert.assertEquals(2, l.or(r).getPlans().size()); // verify OR of multi plan with a single scanplan - Assert.assertEquals(2, l.or(new ScanPlan()).getPlans().size()); - Assert.assertEquals(2, (new ScanPlan()).or(l).getPlans().size()); + Assert.assertEquals(2, l.or(new ScanPlan(defaultPartitionName)).getPlans().size()); + Assert.assertEquals(2, (new ScanPlan(defaultPartitionName)).or(l).getPlans().size()); // verify or of two multiplans with more than one scan plan - r = createMultiScanPlan(new ScanPlan(), new ScanPlan()); + r = createMultiScanPlan(new ScanPlan(defaultPartitionName), new ScanPlan(defaultPartitionName)); Assert.assertEquals(3, l.or(r).getPlans().size()); Assert.assertEquals(3, r.or(l).getPlans().size()); @@ -192,21 +194,22 @@ private MultiScanPlan createMultiScanPlan(ScanPlan... scanPlans) { */ @Test public void testMultiScanPlanAnd() { - MultiScanPlan l = createMultiScanPlan(new ScanPlan()); - MultiScanPlan r = createMultiScanPlan(new ScanPlan()); + MultiScanPlan l = createMultiScanPlan(new ScanPlan(defaultPartitionName)); + MultiScanPlan r = createMultiScanPlan(new ScanPlan(defaultPartitionName)); // two MultiScanPlan with single scan plans should result in new FilterPlan // with just one scan Assert.assertEquals(1, l.and(r).getPlans().size()); // l has one ScanPlan, r has two. AND result should have two - r = createMultiScanPlan(new ScanPlan(), new ScanPlan()); + r = createMultiScanPlan(new ScanPlan(defaultPartitionName), new ScanPlan(defaultPartitionName)); Assert.assertEquals(2, l.and(r).getPlans().size()); Assert.assertEquals(2, r.and(l).getPlans().size()); // l has 2 ScanPlans, r has 3. AND result should have 6 - l = createMultiScanPlan(new ScanPlan(), new ScanPlan()); - r = createMultiScanPlan(new ScanPlan(), new ScanPlan(), new ScanPlan()); + l = createMultiScanPlan(new ScanPlan(defaultPartitionName), new ScanPlan(defaultPartitionName)); + r = createMultiScanPlan(new ScanPlan(defaultPartitionName), new ScanPlan(defaultPartitionName), + new ScanPlan(defaultPartitionName)); Assert.assertEquals(6, l.and(r).getPlans().size()); Assert.assertEquals(6, r.and(l).getPlans().size()); } @@ -268,7 +271,7 @@ private void verifyPlan(TreeNode l, List parts, String keyName, Sca e = new ExpressionTree(); e.setRootForTest(l); } - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts, defaultPartitionName); FilterPlan plan = planRes.plan; Assert.assertEquals("Has unsupported condition", hasUnsupportedCondition, planRes.hasUnsupportedCondition); @@ -328,7 +331,7 @@ public void testTreeNodePlan() throws MetaException { tn = new TreeNode(l, LogicalOperator.OR, r); ExpressionTree e = new ExpressionTree(); e.setRootForTest(tn); - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts, defaultPartitionName); Assert.assertEquals(2, planRes.plan.getPlans().size()); Assert.assertEquals(false, planRes.hasUnsupportedCondition); @@ -336,7 +339,7 @@ public void testTreeNodePlan() throws MetaException { TreeNode tn2 = new TreeNode(l, LogicalOperator.AND, tn); e = new ExpressionTree(); e.setRootForTest(tn2); - planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts, defaultPartitionName); Assert.assertEquals(2, planRes.plan.getPlans().size()); Assert.assertEquals(false, planRes.hasUnsupportedCondition); @@ -349,7 +352,7 @@ public void testTreeNodePlan() throws MetaException { TreeNode tn3 = new TreeNode(tn2, LogicalOperator.OR, klike); e = new ExpressionTree(); e.setRootForTest(tn3); - planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts, defaultPartitionName); Assert.assertEquals(3, planRes.plan.getPlans().size()); Assert.assertEquals(false, planRes.hasUnsupportedCondition); @@ -365,13 +368,13 @@ public void testPartitionKeyScannerAllString() throws Exception { // One prefix key and one minor key range ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); Assert.assertEquals(planRes.plan.getPlans().size(), 1); ScanPlan sp = planRes.plan.getPlans().get(0); - byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); RowFilter filter = (RowFilter)sp.getFilter(parts); // scan range contains the major key year, rowfilter contains minor key state @@ -387,13 +390,13 @@ public void testPartitionKeyScannerAllString() throws Exception { // Two prefix key and one LIKE operator exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and month > 10 " + "and month <= 11 and state like 'C%'").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); Assert.assertEquals(planRes.plan.getPlans().size(), 1); sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); filter = (RowFilter)sp.getFilter(parts); // scan range contains the major key value year/month, rowfilter contains LIKE operator @@ -409,13 +412,13 @@ public void testPartitionKeyScannerAllString() throws Exception { // One prefix key, one minor key range and one LIKE operator exprTree = PartFilterExprUtil.getFilterParser("year >= 2014 and month > 10 " + "and month <= 11 and state like 'C%'").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); Assert.assertEquals(planRes.plan.getPlans().size(), 1); sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); filter = (RowFilter)sp.getFilter(parts); // scan range contains the major key value year (low bound), rowfilter contains minor key state @@ -431,11 +434,11 @@ public void testPartitionKeyScannerAllString() throws Exception { // Condition contains or exprTree = PartFilterExprUtil.getFilterParser("year = 2014 and (month > 10 " + "or month < 3)").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); filter = (RowFilter)sp.getFilter(parts); // The first ScanPlan contains year = 2014 and month > 10 @@ -444,8 +447,8 @@ public void testPartitionKeyScannerAllString() throws Exception { Assert.assertTrue(Bytes.contains(startRowSuffix, "10".getBytes())); sp = planRes.plan.getPlans().get(1); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); filter = (RowFilter)sp.getFilter(parts); // The first ScanPlan contains year = 2014 and month < 3 @@ -463,13 +466,13 @@ public void testPartitionKeyScannerMixedType() throws Exception { // One prefix key and one minor key range ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); Assert.assertEquals(planRes.plan.getPlans().size(), 1); ScanPlan sp = planRes.plan.getPlans().get(0); - byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); RowFilter filter = (RowFilter)sp.getFilter(parts); // scan range contains the major key year, rowfilter contains minor key state diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java index 178a2de..a0a31a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java @@ -228,7 +228,8 @@ private void log(String error) { List colNames = null; if (t != null) { destTableName = t.getDbName() + "." + t.getTableName(); - fieldSchemas = t.getCols(); + fieldSchemas = new ArrayList(); + fieldSchemas.addAll(t.getCols()); } else { // Based on the plan outputs, find out the target table name and column names. for (WriteEntity output : plan.getOutputs()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index c0edde9..bd5f295 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; @@ -547,12 +548,14 @@ public boolean isStoredAsSubDirectories() { } public List> getSkewedColValues(){ - return tPartition.getSd().getSkewedInfo().getSkewedColValues(); + return tPartition.getSd().getSkewedInfo()!=null? + tPartition.getSd().getSkewedInfo().getSkewedColValues() : new ArrayList>(); } public List getSkewedColNames() { LOG.debug("sd is " + tPartition.getSd().getClass().getName()); - return tPartition.getSd().getSkewedInfo().getSkewedColNames(); + return tPartition.getSd().getSkewedInfo()!=null? + tPartition.getSd().getSkewedInfo().getSkewedColNames() : new ArrayList(); } public void setSkewedValueLocationMap(List valList, String dirName) @@ -561,6 +564,9 @@ public void setSkewedValueLocationMap(List valList, String dirName) .getSkewedColValueLocationMaps(); if (null == mappings) { mappings = new HashMap, String>(); + if (tPartition.getSd().getSkewedInfo()==null) { + tPartition.getSd().setSkewedInfo(new SkewedInfo()); + } tPartition.getSd().getSkewedInfo().setSkewedColValueLocationMaps(mappings); } @@ -569,7 +575,9 @@ public void setSkewedValueLocationMap(List valList, String dirName) } public Map, String> getSkewedColValueLocationMaps() { - return tPartition.getSd().getSkewedInfo().getSkewedColValueLocationMaps(); + return tPartition.getSd().getSkewedInfo()!=null? + tPartition.getSd().getSkewedInfo().getSkewedColValueLocationMaps() + : new HashMap, String>(); } public void checkValidity() throws HiveException { diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java index 73e20a8..6e0d5fe 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java @@ -26,15 +26,21 @@ public class BinarySortableSerDeWithEndPrefix extends BinarySortableSerDe { public static void serializeStruct(Output byteStream, Object[] fieldData, - List fieldOis, boolean endPrefix) throws SerDeException { + List fieldOis, boolean endPrefix, boolean lastIsDefault) + throws SerDeException { for (int i = 0; i < fieldData.length; i++) { serialize(byteStream, fieldData[i], fieldOis.get(i), false, ZERO, ONE); } if (endPrefix) { - if (fieldData[fieldData.length-1]!=null) { - byteStream.getData()[byteStream.getLength()-1]++; - } else { + // If the last key is default partition, we shall + // only get null partition key, but not other partitions; + // On the contrary, if last key is null, which means + // an open end search, we shall get all partitions with + // the prefix + if (fieldData[fieldData.length-1]==null && !lastIsDefault) { byteStream.getData()[byteStream.getLength()-1]+=2; + } else { + byteStream.getData()[byteStream.getLength()-1]++; } } }