diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 530d2f4..6f77886 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -2275,7 +2275,11 @@ public boolean equals(Object obj) { private List add_partitions_core(final RawStore ms, String dbName, String tblName, List parts, final boolean ifNotExists) throws MetaException, InvalidObjectException, AlreadyExistsException, TException { - logInfo("add_partitions"); + // Added audit log in HIVE-14482 + for (Partition tmpPart : parts) { + startPartitionFunction("add_partitions", dbName, tblName, tmpPart.getValues()); + } + boolean success = false; // Ensures that the list doesn't have dups, and keeps track of directories we have created. final Map addedPartitions = @@ -2364,6 +2368,8 @@ public Partition call() throws Exception { fireMetaStoreAddPartitionEvent(tbl, existingParts, null, false); } } + + endFunction("add_partitions", success, null, tblName); } return result; } @@ -2693,8 +2699,10 @@ public Partition add_partition_with_environment_context( final Partition part, EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, MetaException { - startTableFunction("add_partition", - part.getDbName(), part.getTableName()); + if(part != null) { + startPartitionFunction("add_partition", part.getDbName(), part.getTableName(), part.getValues()); + } + Partition ret = null; Exception ex = null; try { @@ -3017,6 +3025,9 @@ public DropPartitionsResult drop_partitions_req( } } + // we now have all the necessary information about the partition, audit the info here + startPartitionFunction("drop_partitions_req", dbName, tblName, partNames); + ms.dropPartitions(dbName, tblName, partNames); success = ms.commitTransaction(); DropPartitionsResult result = new DropPartitionsResult(); @@ -3056,6 +3067,8 @@ public DropPartitionsResult drop_partitions_req( } } } + + endFunction("drop_partitions_req", success, null, tblName); } } @@ -3457,7 +3470,7 @@ private void rename_partition(final String db_name, final String tbl_name, final EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { - startTableFunction("alter_partition", db_name, tbl_name); + startPartitionFunction("rename_partition", db_name, tbl_name, new_part.getValues()); if (LOG.isInfoEnabled()) { LOG.info("New partition values:" + new_part.getValues()); @@ -3516,7 +3529,7 @@ private void rename_partition(final String db_name, final String tbl_name, throw newMetaException(e); } } finally { - endFunction("alter_partition", oldPart != null, ex, tbl_name); + endFunction("rename_partition", oldPart != null, ex, tbl_name); } return; } @@ -3534,13 +3547,13 @@ public void alter_partitions_with_environment_context(final String db_name, fina throws InvalidOperationException, MetaException, TException { - startTableFunction("alter_partitions", db_name, tbl_name); - - if (LOG.isInfoEnabled()) { - for (Partition tmpPart : new_parts) { + for (Partition tmpPart : new_parts) { + if (LOG.isInfoEnabled()) { LOG.info("New partition values:" + tmpPart.getValues()); } + startPartitionFunction("alter_partitions", db_name, tbl_name, tmpPart.getValues()); } + // all partitions are altered atomically // all prehooks are fired together followed by all post hooks List oldParts = null;