diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HybridRawStoreProxy.java metastore/src/java/org/apache/hadoop/hive/metastore/HybridRawStoreProxy.java new file mode 100644 index 0000000..5356b28 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/HybridRawStoreProxy.java @@ -0,0 +1,179 @@ +package org.apache.hadoop.hive.metastore; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.lang.reflect.UndeclaredThrowableException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang.ClassUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.hive.metastore.annotation.ReadOperation; +import org.apache.hadoop.hive.metastore.annotation.WriteOperation; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; + +public class HybridRawStoreProxy implements InvocationHandler { + private static final Log LOG = LogFactory.getLog(HybridRawStoreProxy.class); + + private final Class primaryClass; + private final String primaryClassName; + private final String secondaryClassName; + private final boolean secondaryExecuteRead; + private final boolean secondaryExecuteWrite; + private final boolean secondaryCompareReadResults; + private final boolean secondaryCompareWriteResults; + private final RawStore primary; + private final RawStore secondary; + + public static RawStore getProxy(Configuration conf) throws MetaException { + HybridRawStoreProxy handler = new HybridRawStoreProxy(conf); + + return (RawStore) Proxy.newProxyInstance(HybridRawStoreProxy.class.getClassLoader(), handler.primaryClass.getInterfaces(), handler); + } + + protected HybridRawStoreProxy(Configuration conf) throws MetaException { + this.primaryClassName = conf.get("hcatserver.rawstore.hybrid.primary.impl", null); + this.secondaryClassName = conf.get("hcatserver.rawstore.hybrid.secondary.impl", null); + + if (this.primaryClassName == null || this.secondaryClassName == null) { + throw new MetaException("Invalid implementation class: primary=" + this.primaryClassName + " secondary=" + this.secondaryClassName); + } + + this.secondaryExecuteRead = conf.getBoolean("hcatserver.rawstore.hybrid.execute.read", true); + this.secondaryExecuteWrite = conf.getBoolean("hcatserver.rawstore.hybrid.execute.write", true); + this.secondaryCompareReadResults = conf.getBoolean("hcatserver.rawstore.hybrid.compare.read.results", true); + this.secondaryCompareWriteResults = conf.getBoolean("hcatserver.rawstore.hybrid.compare.write.results", true); + + this.primaryClass = + (Class) MetaStoreUtils.getClass(primaryClassName); + Class secondaryClass = + (Class) MetaStoreUtils.getClass(secondaryClassName); + + this.primary = ReflectionUtils.newInstance(primaryClass, conf); + this.secondary = ReflectionUtils.newInstance(secondaryClass, conf); + + LOG.info("Initialized HybridRawStoreProxy: primary=" + this.primary + " secondary=" + this.secondary + + " executeRead=" + this.secondaryExecuteRead + " executeWrite=" + this.secondaryExecuteWrite + + " compareReadResults=" + this.secondaryCompareReadResults + " compareWriteResults=" + this.secondaryCompareWriteResults); + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + Throwable secondaryException = null; + Object primaryResult = null; + Object secondaryResult = null; + + if (method.isAnnotationPresent(ReadOperation.class)) { + LOG.info("Read operation: method=" + method); + + if (this.secondaryExecuteRead) { + try { + Object[] secondaryArgs = args; + // TODO: Hack to ensure that the result argument of the secondary doesn't pollute the primary. + // TODO: Would be safer to always clone arguments. + if (method.getName().equals("getPartitionsByExpr")) { + secondaryArgs = new Object[args.length]; + for (int i = 0; i < args.length - 1; i++) { + secondaryArgs[i] = args[i]; + } + secondaryArgs[args.length - 1] = new ArrayList((List) args[args.length - 1]); + } + + secondaryResult = method.invoke(this.secondary, secondaryArgs); + } catch (InvocationTargetException e) { + secondaryException = e.getCause(); + } catch (Exception e) { + secondaryException = e; + } + } + + if (secondaryException != null) { + LOG.warn("Secondary store threw an exception.", secondaryException); + } + + try { + primaryResult = method.invoke(this.primary, args); + + LOG.debug("Results: primary=" + primaryResult + " secondary=" + secondaryResult); + + if (this.secondaryExecuteRead && this.secondaryCompareReadResults) { + compareResults(method, primaryResult, secondaryResult); + } + } catch (InvocationTargetException e) { + throw e.getCause(); + } + } else if (method.isAnnotationPresent(WriteOperation.class)) { + LOG.info("Write operation: method=" + method); + + if (this.secondaryExecuteWrite) { + try { + secondaryResult = method.invoke(this.secondary, args); + } catch (InvocationTargetException e) { + secondaryException = e.getCause(); + } catch (Exception e) { + secondaryException = e; + } + } + + if (secondaryException != null) { + LOG.warn("Secondary store threw an exception.", secondaryException); + } + + try { + primaryResult = method.invoke(this.primary, args); + + if (this.secondaryExecuteWrite && this.secondaryCompareWriteResults) { + compareResults(method, primaryResult, secondaryResult); + } + } catch (InvocationTargetException e) { + throw e.getCause(); + } + } else { + LOG.info("No annotation found: method=" + method); + try { + primaryResult = method.invoke(this.primary, args); + } catch (InvocationTargetException e) { + throw e.getCause(); + } + } + + return primaryResult; + } + + // NOTE: Copied as-is from RawStoreProxy. + private static Class[] getAllInterfaces(Class baseClass) { + List interfaces = ClassUtils.getAllInterfaces(baseClass); + Class[] result = new Class[interfaces.size()]; + int i = 0; + for (Object o : interfaces) { + result[i++] = (Class)o; + } + return result; + } + + private void compareResults(Method method, Object primaryResult, Object secondaryResult) { + String reason = null; + + if (primaryResult == null && secondaryResult == null) { + return; + } + + if (primaryResult == null && secondaryResult != null) { + reason = this.primaryClassName + " returned null result:"; + } else if (primaryResult != null && secondaryResult == null) { + reason = this.secondaryClassName + " returned null result:"; + } else if (! primaryResult.equals(secondaryResult)) { + reason = "Results differ:"; + } + + if (reason != null) { + LOG.warn(reason + " method=" + method.getName() + " primaryResult=" + primaryResult + " secondaryResult=" + secondaryResult); + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index e5ef157..82211d5 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -27,6 +27,8 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.annotation.ReadOperation; +import org.apache.hadoop.hive.metastore.annotation.WriteOperation; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; @@ -69,6 +71,7 @@ public @interface CanNotRetry { } + @WriteOperation public abstract void shutdown(); /** @@ -78,6 +81,7 @@ * @return an active transaction */ + @WriteOperation public abstract boolean openTransaction(); /** @@ -87,68 +91,91 @@ * @return true or false */ @CanNotRetry + @WriteOperation public abstract boolean commitTransaction(); /** * Rolls back the current transaction if it is active */ @CanNotRetry + @WriteOperation public abstract void rollbackTransaction(); + @WriteOperation public abstract void createDatabase(Database db) throws InvalidObjectException, MetaException; + @ReadOperation public abstract Database getDatabase(String name) throws NoSuchObjectException; + @WriteOperation public abstract boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; + @WriteOperation public abstract boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; + @ReadOperation public abstract List getDatabases(String pattern) throws MetaException; + @ReadOperation public abstract List getAllDatabases() throws MetaException; + @WriteOperation public abstract boolean createType(Type type); + @ReadOperation public abstract Type getType(String typeName); + @WriteOperation public abstract boolean dropType(String typeName); + @WriteOperation public abstract void createTable(Table tbl) throws InvalidObjectException, MetaException; + @WriteOperation public abstract boolean dropTable(String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; + @ReadOperation public abstract Table getTable(String dbName, String tableName) throws MetaException; + @WriteOperation public abstract boolean addPartition(Partition part) throws InvalidObjectException, MetaException; + @WriteOperation public abstract boolean addPartitions(String dbName, String tblName, List parts) throws InvalidObjectException, MetaException; + @WriteOperation public abstract boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException; + @ReadOperation public abstract Partition getPartition(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; + @ReadOperation public abstract boolean doesPartitionExist(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; + @WriteOperation public abstract boolean dropPartition(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; + @ReadOperation public abstract List getPartitions(String dbName, String tableName, int max) throws MetaException, NoSuchObjectException; + @WriteOperation public abstract void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException; + @ReadOperation public List getTables(String dbName, String pattern) throws MetaException; @@ -162,9 +189,11 @@ public abstract void alterTable(String dbname, String name, Table newTable) * If there are duplicate names, only one instance of the table will be returned * @throws MetaException */ + @ReadOperation public List getTableObjectsByName(String dbname, List tableNames) throws MetaException, UnknownDBException; + @ReadOperation public List getAllTables(String dbName) throws MetaException; /** @@ -179,123 +208,162 @@ public abstract void alterTable(String dbname, String name, Table newTable) * @throws MetaException * @throws UnknownDBException */ + @ReadOperation public abstract List listTableNamesByFilter(String dbName, String filter, short max_tables) throws MetaException, UnknownDBException; + @ReadOperation public abstract List
listTablesByQuery(String[] keywords, long maxResults) throws MetaException; + @ReadOperation public abstract List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException; + @ReadOperation public abstract PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException; + @ReadOperation public abstract List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException; + @WriteOperation public abstract void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException; + @WriteOperation public abstract void alterPartitions(String db_name, String tbl_name, List> part_vals_list, List new_parts) throws InvalidObjectException, MetaException; + @WriteOperation public abstract boolean addIndex(Index index) throws InvalidObjectException, MetaException; + @ReadOperation public abstract Index getIndex(String dbName, String origTableName, String indexName) throws MetaException; + @WriteOperation public abstract boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException; + @ReadOperation public abstract List getIndexes(String dbName, String origTableName, int max) throws MetaException; + @ReadOperation public abstract List listIndexNames(String dbName, String origTableName, short max) throws MetaException; + @WriteOperation public abstract void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws InvalidObjectException, MetaException; + @ReadOperation public abstract List getPartitionsByFilter( String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; + @ReadOperation public abstract boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException; + @ReadOperation public abstract List getPartitionsByNames( String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; + @WriteOperation public abstract Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + @ReadOperation public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + @WriteOperation public abstract boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException; + @WriteOperation public abstract boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; + @WriteOperation public abstract boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException, InvalidObjectException; + @WriteOperation public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption) throws MetaException, NoSuchObjectException; + @ReadOperation public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) throws InvalidObjectException, MetaException; + @ReadOperation public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException; + @ReadOperation public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException; + @ReadOperation public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; + @ReadOperation public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; + @ReadOperation public abstract List listPrincipalGlobalGrants(String principalName, PrincipalType principalType); + @ReadOperation public abstract List listPrincipalDBGrants(String principalName, PrincipalType principalType, String dbName); + @ReadOperation public abstract List listAllTableGrants( String principalName, PrincipalType principalType, String dbName, String tableName); + @ReadOperation public abstract List listPrincipalPartitionGrants( String principalName, PrincipalType principalType, String dbName, String tableName, List partValues, String partName); + @ReadOperation public abstract List listPrincipalTableColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, String columnName); + @ReadOperation public abstract List listPrincipalPartitionColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, List partValues, String partName, String columnName); + @WriteOperation public abstract boolean grantPrivileges (PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException; + @WriteOperation public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) throws InvalidObjectException, MetaException, NoSuchObjectException; + @ReadOperation public abstract org.apache.hadoop.hive.metastore.api.Role getRole( String roleName) throws NoSuchObjectException; + @ReadOperation public List listRoleNames(); + @ReadOperation public List listRoles(String principalName, PrincipalType principalType); + @ReadOperation public List listRolesWithGrants(String principalName, PrincipalType principalType); @@ -305,13 +373,16 @@ public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean gran * @param roleName * @return */ + @ReadOperation public List listRoleMembers(String roleName); + @ReadOperation public abstract Partition getPartitionWithAuth(String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException; + @ReadOperation public abstract List getPartitionsWithAuth(String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException; @@ -331,6 +402,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws MetaException * @throws NoSuchObjectException */ + @ReadOperation public abstract List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException; @@ -356,6 +428,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws NoSuchObjectException * @throws InvalidObjectException */ + @ReadOperation public abstract List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException; @@ -368,6 +441,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws InvalidObjectException * @throws InvalidInputException */ + @WriteOperation public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; @@ -381,6 +455,7 @@ public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) * @throws InvalidObjectException * @throws InvalidInputException */ + @WriteOperation public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; @@ -398,6 +473,7 @@ public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsOb * @throws InvalidInputException * */ + @ReadOperation public abstract ColumnStatistics getTableColumnStatistics(String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException; @@ -405,6 +481,7 @@ public abstract ColumnStatistics getTableColumnStatistics(String dbName, String * Returns the relevant column statistics for given columns in given partitions in a given * table in a given database if such statistics exist. */ + @ReadOperation public abstract List getPartitionColumnStatistics( String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; @@ -425,7 +502,7 @@ public abstract ColumnStatistics getTableColumnStatistics(String dbName, String * @throws InvalidObjectException * @throws InvalidInputException */ - + @WriteOperation public abstract boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; @@ -444,70 +521,96 @@ public abstract boolean deletePartitionColumnStatistics(String dbName, String ta * @throws InvalidObjectException * @throws InvalidInputException */ - + @WriteOperation public abstract boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; + @WriteOperation public abstract long cleanupEvents(); + @WriteOperation public abstract boolean addToken(String tokenIdentifier, String delegationToken); + @WriteOperation public abstract boolean removeToken(String tokenIdentifier); + @ReadOperation public abstract String getToken(String tokenIdentifier); + @ReadOperation public abstract List getAllTokenIdentifiers(); + @WriteOperation public abstract int addMasterKey(String key) throws MetaException; + @WriteOperation public abstract void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, MetaException; + @WriteOperation public abstract boolean removeMasterKey(Integer keySeq); + @ReadOperation public abstract String[] getMasterKeys(); + @ReadOperation public abstract void verifySchema() throws MetaException; + @ReadOperation public abstract String getMetaStoreSchemaVersion() throws MetaException; + @WriteOperation public abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; + @ReadOperation public abstract boolean canDropAllPartitions(String dbname, String name, boolean allowsql) throws MetaException, NoSuchObjectException; + @WriteOperation public abstract List dropAllPartitionsNoTxn(String dbname, String name, boolean checkLocation, boolean allowsql, Path tablePath) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; + @WriteOperation void dropPartitions(String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; + @ReadOperation List listPrincipalDBGrantsAll( String principalName, PrincipalType principalType); + @ReadOperation List listPrincipalTableGrantsAll( String principalName, PrincipalType principalType); + @ReadOperation List listPrincipalPartitionGrantsAll( String principalName, PrincipalType principalType); + @ReadOperation List listPrincipalTableColumnGrantsAll( String principalName, PrincipalType principalType); + @ReadOperation List listPrincipalPartitionColumnGrantsAll( String principalName, PrincipalType principalType); + @ReadOperation List listGlobalGrantsAll(); + @ReadOperation List listDBGrantsAll(String dbName); + @ReadOperation List listPartitionColumnGrantsAll( String dbName, String tableName, String partitionName, String columnName); + @ReadOperation List listTableGrantsAll(String dbName, String tableName); + @ReadOperation List listPartitionGrantsAll( String dbName, String tableName, String partitionName); + @ReadOperation List listTableColumnGrantsAll( String dbName, String tableName, String columnName); @@ -517,6 +620,7 @@ void dropPartitions(String dbName, String tblName, List partNames) * @throws InvalidObjectException * @throws MetaException */ + @WriteOperation public void createFunction(Function func) throws InvalidObjectException, MetaException; @@ -528,6 +632,7 @@ public void createFunction(Function func) * @throws InvalidObjectException * @throws MetaException */ + @WriteOperation public void alterFunction(String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException; @@ -541,6 +646,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) * @throws InvalidObjectException * @throws InvalidInputException */ + @WriteOperation public void dropFunction(String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; @@ -551,6 +657,7 @@ public void dropFunction(String dbName, String funcName) * @return * @throws MetaException */ + @ReadOperation public Function getFunction(String dbName, String funcName) throws MetaException; /** @@ -560,8 +667,10 @@ public void dropFunction(String dbName, String funcName) * @return * @throws MetaException */ + @ReadOperation public List getFunctions(String dbName, String pattern) throws MetaException; + @ReadOperation public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; @@ -570,6 +679,7 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * @param rqst Request containing information on the last processed notification. * @return list of notifications, sorted by eventId */ + @ReadOperation public NotificationEventResponse getNextNotification(NotificationEventRequest rqst); @@ -577,12 +687,14 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * Add a notification entry. This should only be called from inside the metastore * @param event the notification to add */ + @WriteOperation public void addNotificationEvent(NotificationEvent event); /** * Remove older notification events. * @param olderThan Remove any events older than a given number of seconds */ + @WriteOperation public void cleanNotificationEvents(int olderThan); /** @@ -591,6 +703,7 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * and determine which notification events happened before or after the export. * @return */ + @ReadOperation public CurrentNotificationEventId getCurrentNotificationEventId(); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java index 5bde45b..9e61948 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java @@ -34,9 +34,13 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + @InterfaceAudience.Private @InterfaceStability.Evolving public class RawStoreProxy implements InvocationHandler { + private static final Log LOG = LogFactory.getLog(RawStoreProxy.class); private final RawStore base; private final MetaStoreInit.MetaStoreInitData metaStoreInitData = @@ -54,7 +58,20 @@ protected RawStoreProxy(HiveConf hiveConf, Configuration conf, // This has to be called before initializing the instance of RawStore init(); - this.base = ReflectionUtils.newInstance(rawStoreClass, conf); + if (rawStoreClass.getName().equals("org.apache.hadoop.hive.metastore.HybridRawStoreProxy")) { + try { + Method m = rawStoreClass.getDeclaredMethod("getProxy", Configuration.class); + this.base = (RawStore) m.invoke(null, conf); + } catch (NoSuchMethodException e) { + throw new MetaException(e.getMessage()); + } catch (IllegalAccessException e) { + throw new MetaException(e.getMessage()); + } catch (InvocationTargetException e) { + throw new MetaException(e.getMessage()); + } + } else { + this.base = ReflectionUtils.newInstance(rawStoreClass, conf); + } } public static RawStore getProxy(HiveConf hiveConf, Configuration conf, String rawStoreClassName, @@ -63,8 +80,17 @@ public static RawStore getProxy(HiveConf hiveConf, Configuration conf, String ra Class baseClass = (Class) MetaStoreUtils.getClass( rawStoreClassName); + LOG.info("getProxy: rawStoreClassName=" + rawStoreClassName + " baseClass=" + baseClass + + " interfaces=" + java.util.Arrays.asList(getAllInterfaces(baseClass))); + RawStoreProxy handler = new RawStoreProxy(hiveConf, conf, baseClass, id); + if (rawStoreClassName.equals("org.apache.hadoop.hive.metastore.HybridRawStoreProxy")) { + String baseClassName = conf.get("hcatserver.rawstore.hybrid.primary.impl", "org.apache.hadoop.hive.metastore.ObjectStore"); + + baseClass = (Class) MetaStoreUtils.getClass(baseClassName); + } + // Look for interfaces on both the class and all base classes. return (RawStore) Proxy.newProxyInstance(RawStoreProxy.class.getClassLoader(), getAllInterfaces(baseClass), handler); @@ -111,8 +137,20 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl isTimerStarted = true; } + long start = System.currentTimeMillis(); + ret = method.invoke(base, args); + long end = System.currentTimeMillis(); + + StringBuilder sb = new StringBuilder(); + sb.append("PERFORMANCE: "); + sb.append(method.getName()); + sb.append(": "); + sb.append(end - start); + + LOG.info(sb.toString()); + if (isTimerStarted) { Deadline.stopTimer(); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index 0075ead..15cefcd 100755 --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -559,7 +559,21 @@ public static String makePartName(List partCols, for (FieldSchema col: partCols) { colNames.add(col.getName()); } - return FileUtils.makePartName(colNames, vals, defaultStr); + return makePartNameFromColumnNames(colNames, vals, defaultStr); + } + + public static String makePartNameFromColumnNames(List names, List values, String defValue) throws MetaException { + if (names.size() == 0 || (names.size() != values.size())) { + StringBuilder sb = new StringBuilder(); + sb.append("Invalid partition key & values; keys="); + sb.append(names.toString()); + sb.append(" values="); + sb.append(values.toString()); + + throw new MetaException(sb.toString()); + } + + return FileUtils.makePartName(names, values, defValue); } public static List getPartValuesFromPartName(String partName) diff --git metastore/src/java/org/apache/hadoop/hive/metastore/annotation/ReadOperation.java metastore/src/java/org/apache/hadoop/hive/metastore/annotation/ReadOperation.java new file mode 100644 index 0000000..a0b6911 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/annotation/ReadOperation.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +public @interface ReadOperation { +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/annotation/WriteOperation.java metastore/src/java/org/apache/hadoop/hive/metastore/annotation/WriteOperation.java new file mode 100644 index 0000000..8532d40 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/annotation/WriteOperation.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +public @interface WriteOperation { +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleDatabase.java metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleDatabase.java new file mode 100644 index 0000000..ff6df43 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleDatabase.java @@ -0,0 +1,13 @@ +package org.apache.hadoop.hive.metastore.oracle; + +public class OracleDatabase { + public long id; + public String name; + public String location; + + public OracleDatabase(long id, String name, String location) { + this.id = id; + this.name = name; + this.location = location; + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OraclePartition.java metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OraclePartition.java new file mode 100644 index 0000000..1976ce4 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OraclePartition.java @@ -0,0 +1,52 @@ +package org.apache.hadoop.hive.metastore.oracle; + +public class OraclePartition { + public long id; + public long tblId; + public long sdId; + public long cdId; + public long paramsId; + public long sdParamsId; + public long serdeParamsId; + public String name; + public String location; + public int creationTime; + public String lastModifiedTime; + public int lastAccessTime; + + public OraclePartition(long id, long tblId, long sdId, long cdId, String name, + String location, int creationTime, String lastModifiedTime, int lastAccessTime) { + this.id = id; + this.tblId = tblId; + this.sdId = sdId; + this.cdId = cdId; + this.paramsId = 0L; + this.sdParamsId = 0L; + this.serdeParamsId = 0L; + this.name = name; + this.location = location; + this.creationTime = creationTime; + this.lastModifiedTime = lastModifiedTime; + this.lastAccessTime = lastAccessTime; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("OraclePartition: {"); + sb.append(" id=" + this.id); + sb.append(" tbl=" + this.tblId); + sb.append(" sd=" + this.sdId); + sb.append(" cd=" + this.cdId); + sb.append(" params=" + this.paramsId); + sb.append(" sd_params=" + this.sdParamsId); + sb.append(" serde_params=" + this.serdeParamsId); + sb.append(" name=" + this.name); + sb.append(" location=" + this.location); + sb.append(" creation_time=" + this.creationTime); + sb.append(" last_modified_time=" + this.lastModifiedTime); + sb.append(" last_access_time=" + this.lastAccessTime); + sb.append(" }"); + + return sb.toString(); + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleStore.java metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleStore.java new file mode 100644 index 0000000..f52f8f1 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleStore.java @@ -0,0 +1,5514 @@ +package org.apache.hadoop.hive.metastore.oracle; + +import java.sql.Clob; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLIntegrityConstraintViolationException; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.sql.DataSource; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.dbcp.DriverManagerConnectionFactory; +import org.apache.commons.dbcp.PoolableConnectionFactory; +import org.apache.commons.dbcp.PoolingDataSource; +import org.apache.commons.pool.impl.GenericObjectPool; +import org.apache.commons.pool.impl.StackKeyedObjectPoolFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; +import org.apache.hadoop.hive.metastore.ProtectMode; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.TableSearchResult; +import org.apache.hadoop.hive.metastore.TableSearchSQL; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; +import org.apache.hadoop.hive.metastore.api.PartitionValuesRow; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.ResourceType; +import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hive.common.util.HiveStringUtils; +import org.apache.thrift.TException; + +/* METASTORE_RAW_STORE_IMPL "hive.metastore.rawstore.impl" "org.apache.hadoop.hive.metastore.ObjectStore" */ + +public class OracleStore implements RawStore { + /* Copy of DataNucleus configuration names from RDBMSPropertyNames. */ + public static final String CONNECTION_POOL_MAX_ACTIVE = "datanucleus.connectionPool.maxActive"; + public static final String CONNECTION_POOL_MAX_IDLE = "datanucleus.connectionPool.maxIdle"; + public static final String CONNECTION_POOL_MAX_WAIT = "datanucleus.connectionPool.maxWait"; + public static final String CONNECTION_POOL_MIN_EVICTABLE_IDLE_TIME_MILLIS = "datanucleus.connectionPool.minEvictableIdleTimeMillis"; + public static final String CONNECTION_POOL_MIN_IDLE = "datanucleus.connectionPool.minIdle"; + public static final String CONNECTION_POOL_TEST_SQL = "datanucleus.connectionPool.testSQL"; + public static final String CONNECTION_POOL_TIME_BETWEEN_EVICTOR_RUNS_MILLIS = "datanucleus.connectionPool.timeBetweenEvictionRunsMillis"; + + public static final String MAXPARTLIMIT_MSGPREFIX = "Partition predicate "; + + private static final Log LOG = LogFactory.getLog(OracleStore.class); + private static final Map EMPTY_PARAMETERS = Collections.emptyMap(); + private static final ReentrantLock lock = new ReentrantLock(); + private static GenericObjectPool pool = null; + private static DataSource source = null; + + private Configuration conf; + private Connection connection; + private PartitionExpressionProxy expressionProxy = null; + private Pattern partitionValidationPattern; + private int maxPartitionsPermitted = -1; + + private static enum TransactionState { CLOSED, OPEN, ROLLBACK }; + + private int openConnectionCount = 0; + private int openTransactionCount = 0; + private TransactionState transactionState = TransactionState.CLOSED; + + // TODO: Make this static and initialize once. + private List discoveryQueries; + + private Map storageDescriptors = null; + + public OracleStore() { + } + + @Override + public Configuration getConf() { + return this.conf; + } + + @Override + public void setConf(Configuration configuration) { + // Initialize the DataSource. + this.conf = configuration; + this.expressionProxy = null; + this.maxPartitionsPermitted = configuration.getInt(HiveConf.ConfVars.HIVELIMITTABLESCANPARTITION.varname, -1); + + this.lock.lock(); + + try { + LOG.info("Initializing OracleObjectStore"); + + // Reinitialize the connection pool if the configs have changed. + if (this.source == null) { + this.source = initializeConnectionPool(); + } + + if (this.source == null) { + LOG.error("Unable to initialize the DataSource"); + } + + /* + if (this.source != null) { + try { + this.connection = this.source.getConnection(); + LOG.info("Startup: connection=" + this.connection + " thread_id=" + Thread.currentThread().getId() + " thread_name=" + Thread.currentThread().getName()); + LOG.info("Connection: autocommit=" + this.connection.getAutoCommit() + " holdability=" + this.connection.getHoldability() + " transaction=" + this.connection.getTransactionIsolation()); + } catch (SQLException e) { + LOG.error("Unable to create a Connection", e); + } + } + else { + LOG.error("Unable to initialize the DataSource"); + } + */ + + this.expressionProxy = initializeExpressionProxy(configuration); + String partitionValidationRegex = + configuration.get(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.name()); + // NOTE: Original condition was partitionValidationRegex.equals("") + if (partitionValidationRegex != null && ! partitionValidationRegex.equals("")) { + partitionValidationPattern = Pattern.compile(partitionValidationRegex); + } else { + partitionValidationPattern = null; + } + + this.discoveryQueries = initializeDiscoveryQueries(); + + try { + this.storageDescriptors = getStorageDescriptorMap(); + } catch (SQLException e) { + LOG.error("Unable to pre-fetch StorageDescriptors", e); + } + } finally { + this.lock.unlock(); + } + } + + private DataSource initializeConnectionPool() { + String driver = this.conf.get(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER.varname, null); + if (driver == null) { + // Empty driver name. + } + + LOG.info("Attempting to load driver: " + driver); + + try { + Class.forName(driver); + } catch (ClassNotFoundException e) { + // Driver not found in classpath. + } + + //GenericObjectPool pool = new GenericObjectPool(null); + this.pool = new GenericObjectPool(null); + + // Get the driver configuration properties. + String uri = this.conf.get(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname); + Properties props = new Properties(); + props.setProperty("user", this.conf.get(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME.varname, "")); + props.setProperty("password", this.conf.get(HiveConf.ConfVars.METASTOREPWD.varname, "")); + + // Set up the connection pool. + // javax.jdo.option.ConnectionURL METASTORECONNECTURLKEY + // javax.jdo.option.ConnectionDriverName METASTORE_CONNECTION_DRIVER + // javax.jdo.option.ConnectionUserName METASTORE_CONNECTION_USER_NAME + // javax.jdo.option.ConnectionPassword METASTOREPWD + // javax.jdo.option.Multithreaded METASTOREMULTITHREADED + DriverManagerConnectionFactory factory = new DriverManagerConnectionFactory(uri, props); + + StackKeyedObjectPoolFactory keyedFactory = null; + + //PoolableConnectionFactory poolFactory = new PoolableConnectionFactory(factory, null); + PoolableConnectionFactory poolFactory = new PoolableConnectionFactory(factory, pool, keyedFactory, "", false, false); + + //GenericObjectPool pool = new GenericObjectPool<>(poolFactory); + //poolFactory.setPool(pool); + + // Configure pool according to DataNucleus properties. + int value = 0; + + value = this.conf.getInt(CONNECTION_POOL_MAX_IDLE, 0); + if (value > 0) { + LOG.info("Connection pool: max_idle=" + value); + pool.setMaxIdle(value); + } + + value = this.conf.getInt(CONNECTION_POOL_MIN_IDLE, 0); + if (value > 0) { + LOG.info("Connection pool: min_idle=" + value); + pool.setMinIdle(value); + } + + value = this.conf.getInt(CONNECTION_POOL_MAX_ACTIVE, 0); + if (value > 0) { + LOG.info("Connection pool: max_active=" + value); + pool.setMaxActive(value); + } + + value = this.conf.getInt(CONNECTION_POOL_MAX_WAIT, 0); + if (value > 0) { + LOG.info("Connection pool: max_wait=" + value); + pool.setMaxWait(value); + } + + value = this.conf.getInt(CONNECTION_POOL_TIME_BETWEEN_EVICTOR_RUNS_MILLIS, 0); + if (value > 0) { + // Evict at least 1/4 of the maxIdle connections. + int maxIdle = pool.getMaxIdle(); + int numTestsPerEvictionRun = (int) Math.ceil(((double) maxIdle / 4)); + + LOG.info("Connection pool: time_between_eviction_runs_millis=" + value); + pool.setTimeBetweenEvictionRunsMillis(value); + LOG.info("Connection pool: num_tests_per_eviction_run=" + numTestsPerEvictionRun); + pool.setNumTestsPerEvictionRun(numTestsPerEvictionRun); + } + else if (value < 0) { + // Disable the evictor thread. + LOG.info("Connection pool: time_between_eviction_runs_millis=-1"); + pool.setTimeBetweenEvictionRunsMillis(-1); + } + + value = this.conf.getInt(CONNECTION_POOL_MIN_EVICTABLE_IDLE_TIME_MILLIS, 0); + if (value > 0) { + LOG.info("Connection pool: min_evictable_idle_time_millis=" + value); + pool.setMinEvictableIdleTimeMillis(value); + } + + String testSql = this.conf.get(CONNECTION_POOL_TEST_SQL, null); + if (testSql != null) + { + LOG.info("Connection pool: test_on_borrow=true validation_query=" + testSql); + poolFactory.setValidationQuery(testSql); + pool.setTestOnBorrow(true); + } else { + LOG.info("Connection pool: test_on_borrow=false"); + pool.setTestOnBorrow(false); + } + + PoolingDataSource ds = new PoolingDataSource(pool); + + LOG.info("Successfully created DataSource"); + + return ds; + } + + /** + * Returns a new PartitionExpressionProxy used to evaluate expressions. + * Required to prevent circular dependency - ql -> metastore client -> metastore server -> ql. + * If server and client are split, this can be removed. + * @param conf the Configuration. + * @return a new PartitionExpressionProxy object. + */ + private PartitionExpressionProxy initializeExpressionProxy(Configuration conf) { + String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS); + + try { + @SuppressWarnings("unchecked") + Class clazz = + (Class) MetaStoreUtils.getClass(className); + + return MetaStoreUtils.newInstance(clazz, new Class[0], new Object[0]); + } catch (MetaException e) { + LOG.error("Error initializing PartitionExpressionProxy", e); + throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage()); + } + } + + @Override + public void shutdown() { + // Clean up the connection. + try { + if (this.connection != null) { + LOG.info("Shutdown: connection=" + this.connection + " thread_id=" + Thread.currentThread().getId() + " thread_name=" + Thread.currentThread().getName()); + LOG.info("Closing Connection"); + this.connection.close(); + } + } catch (SQLException e) { + LOG.error("Unable to close Connection.", e); + } + } + + /** + * Opens a new transaction and increments the open transaction count. + * All calls to openTransaction, including nested ones, must have a corresponding + * call to commitTransaction, or a call to rollbackTransaction on failure. + * @return true if a transaction was opened; otherwise false. + */ + @Override + public boolean openTransaction() { + if (this.openTransactionCount == 0) { + try { + open(); + + this.connection.setAutoCommit(false); + this.openTransactionCount++; + this.transactionState = TransactionState.OPEN; + } catch (SQLException e) { + LOG.warn("Unable to open transaction.", e); + } + } else { + this.openTransactionCount++; + } + + return this.transactionState == TransactionState.OPEN; + } + + /** + * Closes the transaction and decrements the open transaction count. + * Committing a nested transaction is a noop. + * @return true if the transaction was closed successfully; otherwise false. + */ + @CanNotRetry + @Override + public boolean commitTransaction() { + if (this.transactionState == TransactionState.OPEN) { + if (this.openTransactionCount < 1) { + throw new RuntimeException("Unable to commit transaction. Imbalance of open/commit calls."); + } + + this.openTransactionCount--; + + if (this.openTransactionCount == 0) { + try { + this.connection.commit(); + this.transactionState = TransactionState.CLOSED; + } catch (SQLException e) { + LOG.error("Unable to commit transaction.", e); + } + + close(); + + return this.transactionState == TransactionState.CLOSED; + } + } + + return this.transactionState == TransactionState.OPEN; + } + + /** + * Rolls back the transaction and resets the open transaction count. + */ + @CanNotRetry + @Override + public void rollbackTransaction() { + if (this.transactionState == TransactionState.OPEN && this.openTransactionCount > 0) { + try { + if (this.connection != null) { + this.connection.rollback(); + } else { + LOG.warn("Rollback transaction was called on null Connection."); + } + } catch (SQLException e) { + LOG.error("Unable to rollback transaction.", e); + } + + close(); + + this.openTransactionCount = 0; + this.transactionState = TransactionState.ROLLBACK; + } + } + + /* Database support */ + public void createDatabase(Database db) throws InvalidObjectException, MetaException { + // Required by hcat_server init. + boolean committed = false; + + PreparedStatement ps = null; + + try { + open(); + + openTransaction(); + + long id = getNextDatabaseId(); + + String name = HiveStringUtils.normalizeIdentifier(db.getName()); + + ps = this.connection.prepareStatement("INSERT INTO V2_DBS (DB_ID, NAME, OWNER_NAME, OWNER_TYPE, LOCATION, DESCRIPTION) VALUES (?, ?, ?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setString(2, name); + ps.setString(3, db.getOwnerName()); + ps.setString(4, db.getOwnerType() == null ? PrincipalType.USER.name() : db.getOwnerType().name()); + ps.setString(5, db.getLocationUri()); + ps.setString(6, db.getDescription() == null ? "" : db.getDescription()); + + LOG.info(ps.toString()); + + int rc = ps.executeUpdate(); + + LOG.info("Added rows: " + rc); + + // Add database parameters. + if (db.getParameters() != null && db.getParameters().size() > 0) { + setDatabaseParams(id, db.getParameters()); + } + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + } + + public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { + boolean committed = false; + + PreparedStatement ps = null; + + try { + open(); + + openTransaction(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + OracleDatabase mdb = verifyDatabase(getOracleDatabase(dbname), dbname); + + ps = this.connection.prepareStatement("DELETE FROM V2_DBS WHERE DB_ID = ?"); + ps.setLong(1, mdb.id); + + int rc = ps.executeUpdate(); + + LOG.info("Deleted rows: " + rc); + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + + return committed; + } + + public boolean alterDatabase(String dbname, Database db) throws MetaException, NoSuchObjectException { + boolean committed = false; + + PreparedStatement ps = null; + + try { + open(); + + openTransaction(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + OracleDatabase mdb = verifyDatabase(getOracleDatabase(dbname), dbname); + + Database current = getDatabase(dbname); + + LOG.info(db); + + // Check for ALTER DATABASE SET BDPROPERTIES + if (! current.getParameters().equals(db.getParameters())) { + LOG.info("Params are different: " + db.getParameters()); + + // Delete all of the existing parameters. + deleteDatabaseParams(mdb.id); + + // Set the new parameters if they exist. + if (db.getParameters() != null && db.getParameters().size() > 0) { + setDatabaseParams(mdb.id, db.getParameters()); + } + } + + // Check for ALTER DATABASE SET OWNER + if (! db.getOwnerName().equals(current.getOwnerName())) { + LOG.info("Updating owner: " + db.getOwnerName() + " " + db.getOwnerType()); + + ps = this.connection.prepareStatement("UPDATE V2_DBS SET OWNER_NAME = ?, OWNER_TYPE = ? WHERE DB_ID = ?"); + ps.setString(1, db.getOwnerName()); + ps.setString(2, db.getOwnerType().name()); + ps.setLong(3, mdb.id); + + try { + ps.executeUpdate(); + } finally { + close(ps); + } + } + + // Check for ALTER DATABASE SET LOCATION + if (! db.getLocationUri().equals(current.getLocationUri())) { + LOG.info("Updating location: " + db.getLocationUri()); + + ps = this.connection.prepareStatement("UPDATE V2_DBS SET LOCATION = ? WHERE DB_ID = ?"); + ps.setString(1, db.getLocationUri()); + ps.setLong(2, mdb.id); + + try { + ps.executeUpdate(); + } finally { + close(ps); + } + } + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + + return committed; + } + + public Database getDatabase(String dbname) throws NoSuchObjectException { + // Required by hcat_server init. + Database db = null; + boolean exists = false; + + long id = 0; + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + ps = this.connection.prepareStatement("SELECT DB_ID, NAME, OWNER_NAME, OWNER_TYPE, LOCATION, DESCRIPTION FROM V2_DBS WHERE NAME = ?"); + ps.setString(1, HiveStringUtils.normalizeIdentifier(dbname)); + + rs = ps.executeQuery(); + + if (rs.next()) { + id = rs.getLong(1); + + db = new Database(); + db.setName(rs.getString(2)); + db.setOwnerName(rs.getString(3)); + db.setOwnerType(getPrincipalTypeFromString(rs.getString(4))); + db.setLocationUri(rs.getString(5)); + db.setDescription(rs.getString(6)); + + exists = true; + } + + LOG.info("db: " + db); + + if (db != null) { + // Fetch database parameters. + Map params = null; + + try { + params = getDatabaseParams(id); + } catch (SQLException e) { + LOG.error("Failure.", e); + params = new HashMap(0); + } + + db.setParameters(params); + } else { + // TODO: Throw same error message as verifyDatabase. + throw new NoSuchObjectException(dbname); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new NoSuchObjectException(dbname + ": " + e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return db; + } + + public List getDatabases(String pattern) throws MetaException { + ArrayList names = new ArrayList(); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + ps = this.connection.prepareStatement("SELECT NAME FROM V2_DBS ORDER BY NAME"); + rs = ps.executeQuery(); + + while (rs.next()) { + names.add(rs.getString(1)); + } + + // Apply the filter pattern. + // TODO: Pushdown filter to database. + filterByPattern(pattern, names); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return names; + } + + public List getAllDatabases() throws MetaException { + // Required by cli init. + ArrayList names = new ArrayList(); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + ps = this.connection.prepareStatement("SELECT NAME FROM V2_DBS ORDER BY NAME"); + rs = ps.executeQuery(); + + while (rs.next()) { + names.add(rs.getString(1)); + } + + rs.close(); + ps.close(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return names; + } + + /* Table support */ + public void createTable(Table tbl) throws InvalidObjectException, MetaException { + boolean committed = false; + + PreparedStatement ps = null; + Clob viewExpanded = null; + Clob viewOriginal = null; + + try { + open(); + + openTransaction(); + + String dbname = HiveStringUtils.normalizeIdentifier(tbl.getDbName()); + OracleDatabase mdb = getOracleDatabase(dbname); + + if (mdb == null) { + throw new InvalidObjectException("Database " + dbname + " doesn't exist."); + } + + LOG.info("Create table: tbl=" + tbl); + + long id = getNextTableId(); + + // Add table columns. + long cdId = 0L; + if (tbl.getSd().getCols() != null && tbl.getSd().getCols().size() > 0) { + cdId = getNextColumnDescriptorId(); + } + + // Add table parameters if the exist. DDL_TIME is stored with the table metadata. + long paramsId = 0L; + if (tbl.getParameters() != null && tbl.getParameters().size() > 0) { + if (tbl.getParameters().size() > 1 || ! tbl.getParameters().containsKey(hive_metastoreConstants.DDL_TIME)) { + paramsId = getNextTableParamsId(); + } + } + + StorageDescriptor sd = new StorageDescriptor(tbl.getSd()); + sd.unsetCols(); + sd.unsetLocation(); + sd.unsetParameters(); + sd.getSerdeInfo().unsetParameters(); + sd.unsetSkewedInfo(); + + // Add storage descriptor (foreign key). + long sdId = this.storageDescriptors.containsKey(sd) ? this.storageDescriptors.get(sd) : createStorageDescriptor(sd); + + // Add storage descriptor params. + long sdParamsId = 0L; + if (tbl.getSd().getParameters() != null && tbl.getSd().getParameters().size() > 0) { + sdParamsId = getNextStorageDescriptorParamsId(); + } + + // Add serde params. + long serdeParamsId = 0L; + if (tbl.getSd().getSerdeInfo().getParameters() != null && tbl.getSd().getSerdeInfo().getParameters().size() > 0) { + serdeParamsId = getNextSerdeParamsId(); + } + + String name = HiveStringUtils.normalizeIdentifier(tbl.getTableName()); + String type = tbl.getTableType(); + + ps = this.connection.prepareStatement("INSERT INTO V2_TBLS (TBL_ID, DB_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, TYPE, OWNER_NAME, LOCATION, RETENTION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME, VIEW_EXPANDED_TEXT, VIEW_ORIGINAL_TEXT) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setLong(2, mdb.id); + ps.setLong(3, sdId); + if (cdId > 0L) { + ps.setLong(4, cdId); + } else { + ps.setNull(4, Types.BIGINT); + } + if (paramsId > 0L) { + ps.setLong(5, paramsId); + } else { + ps.setNull(5, Types.BIGINT); + } + if (sdParamsId > 0L) { + ps.setLong(6, sdParamsId); + } else { + ps.setNull(6, Types.BIGINT); + } + if (serdeParamsId > 0L) { + ps.setLong(7, serdeParamsId); + } else { + ps.setNull(7, Types.BIGINT); + } + ps.setString(8, name); + ps.setString(9, type); + ps.setString(10, tbl.getOwner()); + ps.setString(11, tbl.getSd().getLocation()); + ps.setInt(12, tbl.getRetention()); + ps.setInt(13, tbl.getCreateTime()); + ps.setInt(14, tbl.getCreateTime()); + ps.setInt(15, tbl.getLastAccessTime()); + if (tbl.getViewExpandedText() != null && tbl.getViewExpandedText().length() > 0) { + viewExpanded = this.connection.createClob(); + viewExpanded.setString(1, tbl.getViewExpandedText()); + ps.setClob(16, viewExpanded); + } else { + ps.setNull(16, Types.CLOB); + } + if (tbl.getViewOriginalText() != null && tbl.getViewOriginalText().length() > 0) { + viewOriginal = this.connection.createClob(); + viewOriginal.setString(1, tbl.getViewOriginalText()); + ps.setClob(17, viewOriginal); + } else { + ps.setNull(17, Types.CLOB); + } + + LOG.info(ps.toString()); + + int rc = ps.executeUpdate(); + + LOG.info("Added rows: " + rc); + + // Add partition columns. + if (tbl.getPartitionKeys() != null && tbl.getPartitionKeys().size() > 0) { + createPartitionColumns(id, tbl.getPartitionKeys()); + } + + // Add table columns. Foreign key prohibit earlier creation. + if (cdId > 0L) { + createTableColumns(id, cdId, tbl.getSd().getCols()); + } + + if (sdParamsId > 0L) { + setStorageDescriptorParams(id, sdParamsId, tbl.getSd().getParameters()); + } + + if (serdeParamsId > 0L) { + setSerdeParams(id, serdeParamsId, tbl.getSd().getSerdeInfo().getParameters()); + } + + if (paramsId > 0L) { + String time = tbl.getParameters().remove(hive_metastoreConstants.DDL_TIME); + setTableParams(id, paramsId, tbl.getParameters()); + tbl.getParameters().put(hive_metastoreConstants.DDL_TIME, time); + } + + // TODO: Set privileges. + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + if (viewOriginal != null) { + try { + viewOriginal.free(); + } catch (SQLException e) { + LOG.warn("Failure to free clob.", e); + } + } + if (viewExpanded != null) { + try { + viewExpanded.free(); + } catch (SQLException e) { + LOG.warn("Failure to free clob.", e); + } + } + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + } + + public boolean dropTable(String dbname, String tblname) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + boolean committed = false; + + PreparedStatement ps = null; + + try { + open(); + + openTransaction(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable tbl = getOracleTable(dbname, tblname); + + if (tbl != null) { + // TODO: Drop grants. + // TODO: Drop statistics. + + // Rely on V2_TBLS foreign keys to clean up satellite tables: + // V2_TBLS -> V2_SD_PARAMS V2_SERDE_PARAMS V2_TBL_COLS V2_TBL_PART_PARAMS V2_PARTITION_COLS V2_PARTITIONS + ps = this.connection.prepareStatement("DELETE FROM V2_TBLS WHERE TBL_ID = ?"); + ps.setLong(1, tbl.id); + + int rc = ps.executeUpdate(); + + LOG.info("Deleted rows: " + rc); + } + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + + return committed; + } + + public void alterTable(String dbname, String tblname, Table newTable) + throws InvalidObjectException, MetaException { + boolean committed = false; + + if (newTable != null) { + PreparedStatement ps = null; + Clob viewExpanded = null; + Clob viewOriginal = null; + + try { + open(); + + openTransaction(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = verifyTable(getOracleTable(dbname, tblname), dbname, tblname); + Table tbl = getTable(dbname, tblname); + + int now = (int) (System.currentTimeMillis() / 1000L); + + // Partition keys can be changed via CREATE OR REPLACE VIEW + if (newTable.getTableType().equals(TableType.VIRTUAL_VIEW.toString())) { + if (! newTable.getPartitionKeys().equals(tbl.getPartitionKeys())) { + LOG.info("View partition keys differ: orig=" + tbl.getPartitionKeys() + " new=" + newTable.getPartitionKeys()); + + deletePartitionColumns(mtbl.id); + createPartitionColumns(mtbl.id, newTable.getPartitionKeys()); + } + } + + // Check columns. + if (! tbl.getSd().getCols().equals(newTable.getSd().getCols())) { + LOG.info("Table columns differ orig=" + tbl.getSd().getCols() + " new=" + newTable.getSd().getCols()); + + mtbl.cdId = createTableColumns(mtbl.id, newTable.getSd().getCols()); + } + + // Check storage descriptor/serde. + StorageDescriptor sd = new StorageDescriptor(newTable.getSd()); + sd.unsetCols(); + sd.unsetLocation(); + sd.unsetParameters(); + sd.getSerdeInfo().unsetParameters(); + sd.unsetSkewedInfo(); // TODO: Not supporting skewed info. + if (this.storageDescriptors.containsKey(sd)) { + mtbl.sdId = this.storageDescriptors.get(sd); + } else { + LOG.info("Storage descriptors differ: orig=" + tbl.getSd() + " new=" + newTable.getSd()); + LOG.info(this.storageDescriptors + " -> " + sd); + + mtbl.sdId = createStorageDescriptor(sd); + } + + HashMap origParams = null; + HashMap newParams = null; + + // Check storage descriptor/serde parameters. + origParams = new HashMap(tbl.getSd().getParameters()); + newParams = new HashMap(); + if (newTable.getSd().getParameters() != null) { + newParams.putAll(newTable.getSd().getParameters()); + } + + if (! origParams.equals(newParams)) { + LOG.info("Storage descriptor parameters differ: orig=" + origParams + " new=" + newParams); + + mtbl.sdParamsId = setStorageDescriptorParams(mtbl.id, newParams); + } + + origParams = new HashMap(tbl.getSd().getSerdeInfo().getParameters()); + newParams = new HashMap(); + if (newTable.getSd().getSerdeInfo().getParameters() != null) { + newParams.putAll(newTable.getSd().getSerdeInfo().getParameters()); + } + + if (! origParams.equals(newParams)) { + LOG.info("Serde parameters differ: orig=" + origParams + " new=" + newParams); + + mtbl.serdeParamsId = setSerdeParams(mtbl.id, newParams); + } + + // Check table parameters. + origParams = new HashMap(tbl.getParameters()); + newParams = new HashMap(); + if (newTable.getParameters() != null) { + newParams.putAll(newTable.getParameters()); + } + origParams.remove(hive_metastoreConstants.DDL_TIME); + newParams.remove(hive_metastoreConstants.DDL_TIME); + + if (! origParams.equals(newParams)) { + LOG.info("Table parameters differ: orig=" + origParams + " new=" + newParams); + + mtbl.paramsId = setTableParams(mtbl.id, newParams); + } + + ps = this.connection.prepareStatement("UPDATE V2_TBLS SET SD_ID = ?, CD_ID = ?, TBL_PART_PARAM_ID = ?, SD_PARAM_ID = ?, SERDE_PARAM_ID = ?, NAME = ?, TYPE = ?, LOCATION = ?, RETENTION = ?, LAST_MODIFIED_TIME = ?, VIEW_EXPANDED_TEXT = ?, VIEW_ORIGINAL_TEXT = ? WHERE TBL_ID = ?"); + ps.setLong(1, mtbl.sdId); + ps.setLong(2, mtbl.cdId); + ps.setLong(3, mtbl.paramsId); + if (mtbl.sdParamsId > 0L) { + ps.setLong(4, mtbl.sdParamsId); + } else { + ps.setNull(4, Types.BIGINT); + } + if (mtbl.serdeParamsId > 0L) { + ps.setLong(5, mtbl.serdeParamsId); + } else { + ps.setNull(5, Types.BIGINT); + } + ps.setString(6, HiveStringUtils.normalizeIdentifier(newTable.getTableName())); + ps.setString(7, newTable.getTableType()); + ps.setString(8, newTable.getSd().getLocation()); + ps.setInt(9, newTable.getRetention()); + ps.setInt(10, now); + if (newTable.getViewExpandedText() != null && newTable.getViewExpandedText().length() > 0) { + viewExpanded = this.connection.createClob(); + viewExpanded.setString(1, newTable.getViewExpandedText()); + ps.setClob(11, viewExpanded); + } else { + ps.setNull(11, Types.CLOB); + } + if (newTable.getViewOriginalText() != null && newTable.getViewOriginalText().length() > 0) { + viewOriginal = this.connection.createClob(); + viewOriginal.setString(1, newTable.getViewOriginalText()); + ps.setClob(12, viewOriginal); + } else { + ps.setNull(12, Types.CLOB); + } + ps.setLong(13, mtbl.id); + + int rc = ps.executeUpdate(); + + LOG.info("Updated rows: " + rc); + + // TODO: Clean up unreferenced columns and parameters. + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + if (viewOriginal != null) { + try { + viewOriginal.free(); + } catch (SQLException e) { + LOG.warn("Failure to free clob.", e); + } + } + if (viewExpanded != null) { + try { + viewExpanded.free(); + } catch (SQLException e) { + LOG.warn("Failure to free clob.", e); + } + } + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + } + } + + public Table getTable(String dbname, String tblname) throws MetaException { + Table tbl = null; + + long id = 0L; + long sdId = 0L; + long cdId = 0L; + long paramsId = 0L; + long sdParamId = 0L; + long serdeParamId = 0L; + String location = null; + String lastModifiedTime = null; + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleDatabase mdb = getOracleDatabase(dbname); + + if (mdb != null) { + ps = this.connection.prepareStatement("SELECT TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, TYPE, OWNER_NAME, LOCATION, RETENTION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME, VIEW_EXPANDED_TEXT, VIEW_ORIGINAL_TEXT FROM V2_TBLS WHERE DB_ID = ? AND NAME = ?"); + ps.setLong(1, mdb.id); + ps.setString(2, tblname); + + rs = ps.executeQuery(); + + if (rs.next()) { + Clob clob = null; + + id = rs.getLong(1); + sdId = rs.getLong(2); + cdId = rs.getLong(3); + paramsId = rs.getLong(4); + if (rs.wasNull()) { + paramsId = 0L; + } + sdParamId = rs.getLong(5); + if (rs.wasNull()) { + sdParamId = 0L; + } + serdeParamId = rs.getLong(6); + if (rs.wasNull()) { + serdeParamId = 0L; + } + location = rs.getString(10); + + tbl = new Table(); + tbl.setParameters(new HashMap()); + tbl.setDbName(dbname); + tbl.setTableName(rs.getString(7)); + tbl.setTableType(rs.getString(8)); + tbl.setOwner(rs.getString(9)); + tbl.setRetention(rs.getInt(11)); + tbl.setCreateTime(rs.getInt(12)); + tbl.getParameters().put(hive_metastoreConstants.DDL_TIME, rs.getString(13)); + tbl.setLastAccessTime(rs.getInt(14)); + + clob = rs.getClob(15); + if (! rs.wasNull()) { + tbl.setViewExpandedText(clob.getSubString(1L, (int) clob.length())); + try { + clob.free(); + } catch (SQLException e) { + LOG.warn("Failure to free clob.", e); + } + } + clob = rs.getClob(16); + if (! rs.wasNull()) { + tbl.setViewOriginalText(clob.getSubString(1L, (int) clob.length())); + try { + clob.free(); + } catch (SQLException e) { + LOG.warn("Failure to free clob.", e); + } + } + } + } + + if (tbl != null) { + try { + // Fetch partition columns. + tbl.setPartitionKeys(getPartitionColumns(id)); + + // Fetch table parameters. + if (paramsId > 0L) { + tbl.getParameters().putAll(getTableParams(paramsId)); + } + + // Fetch storage descriptor. + StorageDescriptor sd = getStorageDescriptor(sdId, cdId, sdParamId, serdeParamId); + sd.setLocation(location); + + tbl.setSd(sd); + + LOG.info("Retrieved table: " + tbl); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return tbl; + } + + public List getTables(String dbname, String pattern) throws MetaException { + ArrayList names = new ArrayList(); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + OracleDatabase mdb = getOracleDatabase(dbname); + + ps = this.connection.prepareStatement("SELECT NAME FROM V2_TBLS WHERE DB_ID = ? ORDER BY NAME"); + ps.setLong(1, mdb.id); + rs = ps.executeQuery(); + + while (rs.next()) { + names.add(rs.getString(1)); + } + + // Apply the filter pattern. + filterByPattern(pattern, names); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return names; + } + + public List getAllTables(String dbname) throws MetaException { + return getTables(dbname, "*"); + } + + @Override + public List listTableNamesByFilter(String dbname, String filter, short limit) + throws MetaException, UnknownDBException { + // NOTE: Appears to be accessible only through direct calls to HiveMetaStoreClient.listTableNamesByFilter + // NOTE: EQUALS and NOTEQUALS appear to be the only supported operators, making this of limited use + ArrayList tblnames = new ArrayList(); + + LOG.info("dbname=" + dbname + " filter=" + filter); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + OracleDatabase mdb = getOracleDatabase(dbname); + + if (mdb != null) { + ArrayList params = new ArrayList(); + String where = null; + + if (filter != null && ! filter.isEmpty()) { + // TODO: Implement filters. + ExpressionTree tree = OracleStoreUtils.createExpressionTree(filter); + + if (tree != null) { + where = TableFilterGenerator.generateSqlFilter(tree, params); + + if (where == null) { + throw new UnsupportedOperationException("Invalid filter: " + filter); + } + + LOG.info("filter SQL=" + where + " params=" + params); + } else { + throw new UnsupportedOperationException("Invalid filter: " + filter); + } + } + + StringBuilder sb = new StringBuilder("SELECT V2_TBLS.NAME FROM V2_TBLS WHERE V2_TBLS.DB_ID = ?"); + + if (where != null && params.size() > 0) { + sb.append(" AND " + where); + } + + sb.append(" ORDER BY 1"); + + if (limit > 0) { + sb.insert(0, "SELECT NAME FROM ("); + sb.append(") WHERE ROWNUM <= "); + sb.append(limit); + } + + LOG.info(sb.toString()); + + ps = this.connection.prepareStatement(sb.toString()); + ps.setLong(1, mdb.id); + + for (int i = 0; i < params.size(); i++) { + ps.setString(i + 2, (String) params.get(i)); + } + + rs = ps.executeQuery(); + + while (rs.next()) { + tblnames.add(rs.getString(1)); + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return tblnames; + } + + public List
getTableObjectsByName(String dbname, List tblnames) throws MetaException, UnknownDBException { + // TODO: Shares code with getTable. + ArrayList
tables = new ArrayList
(tblnames.size()); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + OracleDatabase mdb = getOracleDatabase(dbname); + + if (mdb != null) { + StringBuilder sb = new StringBuilder(); + String comma = ""; + + for (String name : tblnames) { + sb.append(comma + "?"); + comma = ","; + } + + ps = this.connection.prepareStatement("SELECT TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, TYPE, OWNER_NAME, LOCATION, RETENTION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME, VIEW_EXPANDED_TEXT, VIEW_ORIGINAL_TEXT FROM V2_TBLS WHERE DB_ID = ? AND NAME IN (" + sb.toString() + ")"); + ps.setLong(1, mdb.id); + + int idx = 2; + for (String name : tblnames) { + ps.setString(idx++, HiveStringUtils.normalizeIdentifier(name)); + } + + rs = ps.executeQuery(); + + ArrayList mtbls = new ArrayList(tblnames.size()); + + while (rs.next()) { + long id = 0L; + Clob clob = null; + + OracleTable mtbl = new OracleTable(rs.getLong(1), mdb.id, rs.getLong(2), rs.getLong(3), rs.getString(7), rs.getString(8), rs.getString(10)); + id = rs.getLong(4); + mtbl.paramsId = rs.wasNull() ? 0L : id; + id = rs.getLong(5); + mtbl.sdParamsId = rs.wasNull() ? 0L : id; + id = rs.getLong(6); + mtbl.serdeParamsId = rs.wasNull() ? 0L : id; + mtbls.add(mtbl); + + Table tbl = new Table(); + tbl.setParameters(new HashMap()); + tbl.setDbName(dbname); + tbl.setTableName(rs.getString(7)); + tbl.setTableType(rs.getString(8)); + tbl.setOwner(rs.getString(9)); + tbl.setRetention(rs.getInt(11)); + tbl.setCreateTime(rs.getInt(12)); + tbl.getParameters().put(hive_metastoreConstants.DDL_TIME, rs.getString(13)); + tbl.setLastAccessTime(rs.getInt(14)); + + clob = rs.getClob(15); + if (! rs.wasNull()) { + tbl.setViewExpandedText(clob.getSubString(1L, (int) clob.length())); + try { + clob.free(); + } catch (SQLException e) { + LOG.warn("Failure to free clob.", e); + } + } + clob = rs.getClob(16); + if (! rs.wasNull()) { + tbl.setViewOriginalText(clob.getSubString(1L, (int) clob.length())); + try { + clob.free(); + } catch (SQLException e) { + LOG.warn("Failure to free clob.", e); + } + } + + tables.add(tbl); + } + + // TODO: Make this more efficient + for (int i = 0; i < mtbls.size(); i++) { + OracleTable mtbl = mtbls.get(i); + Table tbl = tables.get(i); + + // Fetch partition columns. + tbl.setPartitionKeys(getPartitionColumns(mtbl.id)); + + // Fetch storage descriptor. + StorageDescriptor sd = getStorageDescriptor(mtbl.sdId, mtbl.cdId, mtbl.sdParamsId, mtbl.serdeParamsId); + sd.setLocation(mtbl.location); + + tbl.setSd(sd); + + // Fetch table parameters. + tbl.getParameters().putAll(getTableParams(mtbl.paramsId)); + } + } else { + throw new UnknownDBException("Could not find database " + dbname); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return tables; + } + + /* Partition support */ + public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { + ArrayList parts = new ArrayList(1); + parts.add(part); + + return addPartitions(part.getDbName(), part.getTableName(), parts); + } + + /** + * Assumes calling method has checked for existence of table. + */ + public boolean addPartitions(String dbname, String tblname, List partitions) + throws InvalidObjectException, MetaException { + // Use the same column descriptor as the table if possible. + boolean committed = false; + + PreparedStatement ps = null; + + try { + open(); + + openTransaction(); + + OracleTable mtbl = verifyTable(getOracleTable(dbname, tblname), dbname, tblname); + Table table = getTable(dbname, tblname); + List colNames = getPartitionColumnNames(mtbl.id); + Map tblParams = mtbl.paramsId > 0L ? getTableParams(mtbl.paramsId) : new HashMap(0); + + // TODO: Implement grants + + // Use a common timestamp across all partitions instead of the timestamp in the Partition. + int now = (int) (System.currentTimeMillis() / 1000); + + ps = this.connection.prepareStatement("INSERT INTO V2_PARTITIONS (PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); + ps.setLong(2, mtbl.id); + ps.setInt(10, now); + ps.setInt(11, now); + ps.setInt(12, 0); + + String tblLocation = null; + + long sdId = 0L; + long cdId = 0L; + + HashMap sds = new HashMap(); + HashMap, Long> cds = new HashMap, Long>(); + HashMap, Long> partParams = new HashMap, Long>(); + HashMap, Long> sdParams = new HashMap, Long>(); + HashMap, Long> serdeParams = new HashMap, Long>(); + + if (mtbl.paramsId > 0L) { + // NOTE: tblParams should not contain hive_metastoreConstants.DDL_TIME. + partParams.put(tblParams, mtbl.paramsId); + } + + // TODO: Create minimal StorageDescriptor for comparison. + if (table.getSd() != null) { + // StorageDescriptor locations will always be different; remove from equality check. + tblLocation = table.getSd().getLocation(); + table.getSd().unsetLocation(); + sds.put(table.getSd(), mtbl.sdId); + + if (table.getSd().getCols() != null) { + cds.put(table.getSd().getCols(), mtbl.cdId); + } + + Map params = table.getSd().getParameters(); + if (params != null && params.size() > 0) { + sdParams.put(params, mtbl.sdParamsId); + } + + params = table.getSd().getSerdeInfo().getParameters(); + if (params != null && params.size() > 0) { + serdeParams.put(params, mtbl.serdeParamsId); + } + } + + for (Partition partition : partitions) { + if (! partition.getTableName().equals(tblname) || ! partition.getDbName().equals(dbname)) { + throw new MetaException("Partition does not belong to target table " + + dbname + "." + tblname + ": " + partition); + } + + long id = getNextPartitionId(); + + ps.setLong(1, id); + + if (partition.getSd() != null) { + // Storage descriptor locations will differ between table and partition. + String location = partition.getSd().getLocation(); + partition.getSd().setLocation(null); + + // Check for equality of table/partition storage descriptor and serde. + if (sds.containsKey(partition.getSd())) { + ps.setLong(3, sds.get(partition.getSd())); + } else { + LOG.info("Partition storage descriptor differs from table table=" + table.getSd() + " partition=" + partition.getSd()); + sdId = createStorageDescriptor(partition.getSd()); + sds.put(partition.getSd(), sdId); + ps.setLong(3, sdId); + } + + // Check for equality of table/partition columns and reuse if possible. + List partCols = partition.getSd().getCols(); + if (cds.containsKey(partCols)) { + ps.setLong(4, cds.get(partCols)); + } else { + LOG.info("Partition columns differ from table table=" + table.getSd().getCols() + " partition=" + partCols); + cdId = createTableColumns(mtbl.id, partCols); + cds.put(partCols, cdId); + ps.setLong(4, cdId); + } + + Map params = null; + + // Check for equality of table/partition storage descriptor and serde parameters. + params = partition.getSd().getParameters(); + if (sdParams.containsKey(params)) { + ps.setLong(6, sdParams.get(params)); + } else { + if (params.size() > 0) { + long sdParamsId = setStorageDescriptorParams(mtbl.id, params); + sdParams.put(params, sdParamsId); + ps.setLong(6, sdParamsId); + } else { + ps.setNull(6, Types.BIGINT); + } + } + + params = partition.getSd().getSerdeInfo().getParameters(); + if (serdeParams.containsKey(params)) { + ps.setLong(7, serdeParams.get(params)); + } else { + if (partition.getSd().getSerdeInfo().getParameters().size() > 0) { + long serdeParamsId = setSerdeParams(mtbl.id, params); + serdeParams.put(params, serdeParamsId); + ps.setLong(7, serdeParamsId); + } else { + ps.setNull(7, Types.BIGINT); + } + } + + ps.setString(9, location); + } else { + // Partitions added to views will have a null storage descriptor. + ps.setNull(3, Types.BIGINT); + ps.setNull(4, Types.BIGINT); + ps.setNull(6, Types.BIGINT); + ps.setNull(7, Types.BIGINT); + ps.setNull(9, Types.VARCHAR); + } + + // Check for equality of table/partition parameters. + Map params = partition.getParameters(); + String time = params.remove(hive_metastoreConstants.DDL_TIME); + if (partParams.containsKey(params)) { + ps.setLong(5, partParams.get(params)); + } else { + if (params.size() > 0) { + long paramsId = setPartitionParams(mtbl.id, params); + partParams.put(params, paramsId); + ps.setLong(5, paramsId); + } else { + ps.setNull(5, Types.BIGINT); + } + } + params.put(hive_metastoreConstants.DDL_TIME, time); + + ps.setString(8, constructPartitionName(colNames, partition.getValues())); + + ps.executeUpdate(); + + // TODO: table and table-column grants. + } + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + + return committed; + } + + public boolean addPartitions(String dbname, String tblname, PartitionSpecProxy spec, boolean ifNotExists) + throws InvalidObjectException, MetaException { + if (! spec.getTableName().equals(tblname) || ! spec.getDbName().equals(dbname)) { + throw new MetaException("Partition does not belong to target table " + + dbname + "." + tblname + ": " + spec); + } + + ArrayList partitions = new ArrayList(); + + PartitionSpecProxy.PartitionIterator iter = spec.getPartitionIterator(); + + while (iter.hasNext()) { + Partition part = iter.next(); + + if (isValidPartition(part, ifNotExists)) { + partitions.add(part); + } + } + + return addPartitions(dbname, tblname, partitions); + } + + public boolean canDropAllPartitions(String dbname, String tblname, boolean allowsql) throws MetaException, NoSuchObjectException { + // NOTE: Required by dropTable. + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + OracleTable tbl = getOracleTable(dbname, tblname); + + if (tbl != null) { + ps = this.connection.prepareStatement("SELECT A.NAME, B.VALUE FROM V2_PARTITIONS A INNER JOIN V2_TBL_PART_PARAMS B ON (A.TBL_PART_PARAM_ID = B.TBL_PART_PARAM_ID) WHERE A.TBL_ID = ? AND B.NAME = ?"); + ps.setLong(1, tbl.id); + ps.setString(2, ProtectMode.PARAMETER_NAME); + + rs = ps.executeQuery(); + + boolean protect = false; + String partname = null; + + // Fail on the first partition with a protect mode set. + while (rs.next()) { + ProtectMode mode= ProtectMode.getProtectModeFromString(rs.getString(2)); + if (mode.noDrop || mode.offline || mode.readOnly) { + protect = true; + partname = rs.getString(1); + break; + } + } + + if (protect) { + throw new MetaException("Table " + tblname + " Partition " + partname + " is protected from being dropped"); + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return true; + } + + /** + * Delete metadata for all partitions for the specified table. + * Returns a list of partition locations which are not relative to the table location if + * checkLocation is true. + * @param dbname the name of the database. + * @param tblname the name of the table. + * @param checkLocation whether the list of partitions location should be created. + * @param allowsql not used. + * @param tablePath the path to the root of the table. + * @return a list of partition locations which are not relative to the table location. + */ + public List dropAllPartitionsNoTxn(String dbname, String tblname, boolean checkLocation, boolean allowsql, Path tablePath) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + // NOTE: Required by dropTable. + ArrayList paths = new ArrayList(); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + OracleTable mtbl = getOracleTable(dbname, tblname); + + if (mtbl != null) { + if (checkLocation) { + // mtbl.location may be null for views. + if (mtbl.location != null) { + LOG.info("Fetching partitions for table=" + tblname + " not relative to location=" + mtbl.location); + + String location = mtbl.location + Path.SEPARATOR; + + ps = this.connection.prepareStatement("SELECT LOCATION FROM V2_PARTITIONS WHERE TBL_ID = ? AND LOCATION NOT LIKE ? AND LOCATION IS NOT NULL"); + ps.setLong(1, mtbl.id); + ps.setString(2, location + "%"); + + rs = ps.executeQuery(); + + while (rs.next()) { + paths.add(new Path(rs.getString(1))); + } + } + } + + deleteAllPartitions(mtbl.id); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return paths; + } + + public boolean dropPartition(String dbname, String tblname, List partvals) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + boolean committed = false; + + PreparedStatement ps = null; + + try { + open(); + + openTransaction(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = getOracleTable(dbname, tblname); + + if (mtbl != null) { + List colnames = getPartitionColumnNames(mtbl.id); + OraclePartition mpart = getOraclePartition(mtbl, constructPartitionName(colnames, partvals)); + + if (mpart != null) { + ArrayList mparts = new ArrayList(1); + mparts.add(mpart); + + dropPartitions(mparts); + } + } + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + + return committed; + } + + public void dropPartitions(String dbname, String tblname, List partnames) throws MetaException, NoSuchObjectException { + LOG.info("partnames=" + partnames); + + if (partnames.isEmpty()) { + return; + } + + boolean committed = false; + + PreparedStatement ps = null; + + try { + open(); + + openTransaction(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = getOracleTable(dbname, tblname); + + if (mtbl != null) { + // TODO: Perhaps this should just be List. + List mparts = getOraclePartitions(mtbl, partnames); + + if (mparts.size() > 0) { + dropPartitions(mparts); + } + } + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + } + } + + public void alterPartition(String dbname, String tblname, List partvals, Partition part) + throws InvalidObjectException, MetaException { + boolean committed = false; + + PreparedStatement ps = null; + + try { + open(); + + openTransaction(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = getOracleTable(dbname, tblname); + List colNames = getPartitionColumnNames(mtbl.id); + OraclePartition mpart = getOraclePartition(mtbl, constructPartitionName(colNames, partvals)); + Partition orig = getPartition(dbname, tblname, partvals); + + int now = (int) (System.currentTimeMillis() / 1000L); + + if (! orig.getValues().equals(part.getValues())) { + LOG.info("Partition name differs orig=" + mpart.name + " new=" + constructPartitionName(colNames, part.getValues())); + mpart.name = constructPartitionName(colNames, part.getValues()); + } + + HashMap origParams = null; + HashMap newParams = null; + + if (! TableType.VIRTUAL_VIEW.name().equals(mtbl.type)) { + if (! orig.getSd().getLocation().equals(part.getSd().getLocation())) { + LOG.info("Location differs orig=" + orig.getSd().getLocation() + " new=" + part.getSd().getLocation()); + mpart.location = part.getSd().getLocation(); + } + + // Check storage descriptor/serde. + StorageDescriptor sd = new StorageDescriptor(part.getSd()); + sd.unsetCols(); + sd.unsetLocation(); + sd.unsetParameters(); + sd.getSerdeInfo().unsetParameters(); + sd.unsetSkewedInfo(); // TODO: Not supporting skewed info. + if (this.storageDescriptors.containsKey(sd)) { + mpart.sdId = this.storageDescriptors.get(sd); + } else { + LOG.info("Storage descriptors differ: orig=" + orig.getSd() + " new=" + part.getSd()); + LOG.info(this.storageDescriptors + " -> " + sd); + + mpart.sdId = createStorageDescriptor(sd); + } + + // Check storage descriptor/serde parameters. + origParams = new HashMap(orig.getSd().getParameters()); + newParams = new HashMap(part.getSd().getParameters()); + + if (! origParams.equals(newParams)) { + LOG.info("Storage descriptor parameters differ: orig=" + origParams + " new=" + newParams); + + mpart.sdParamsId = setStorageDescriptorParams(mtbl.id, newParams); + } + + origParams = new HashMap(orig.getSd().getSerdeInfo().getParameters()); + newParams = new HashMap(part.getSd().getSerdeInfo().getParameters()); + + if (! origParams.equals(newParams)) { + LOG.info("Serde parameters differ: orig=" + origParams + " new=" + newParams); + + mpart.serdeParamsId = setSerdeParams(mtbl.id, newParams); + } + } + + // Check partition parameters. + origParams = new HashMap(orig.getParameters()); + newParams = new HashMap(part.getParameters()); + origParams.remove(hive_metastoreConstants.DDL_TIME); + newParams.remove(hive_metastoreConstants.DDL_TIME); + + if (! origParams.equals(newParams)) { + LOG.info("Partition parameters differ orig=" + origParams + " new=" + newParams); + mpart.paramsId = setPartitionParams(mtbl.id, newParams); + } + + ps = this.connection.prepareStatement("UPDATE V2_PARTITIONS SET SD_ID = ?, TBL_PART_PARAM_ID = ?, SD_PARAM_ID = ?, SERDE_PARAM_ID = ?, NAME = ?, LOCATION = ?, LAST_MODIFIED_TIME = ? WHERE PART_ID = ?"); + ps.setLong(1, mpart.sdId); + ps.setLong(2, mpart.paramsId); + ps.setLong(3, mpart.sdParamsId); + ps.setLong(4, mpart.serdeParamsId); + ps.setString(5, mpart.name); + ps.setString(6, mpart.location); + ps.setInt(7, now); + ps.setLong(8, mpart.id); + + int rc = ps.executeUpdate(); + + LOG.info("Updated rows: " + rc); + + committed = commitTransaction(); + } catch (NoSuchObjectException e) { + throw new InvalidObjectException("Partition does not exist."); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + } + + public void alterPartitions(String dbname, String tblname, List> partvals, List parts) + throws InvalidObjectException, MetaException { + if (partvals.size() != parts.size()) { + throw new MetaException("Number of partition values does not equal the number of partitions."); + } + + boolean committed = false; + + PreparedStatement ps = null; + ResultSet rs = null; + + Exception exception = null; + + try { + open(); + + openTransaction(); + + for (int i = 0; i < parts.size(); i++) { + Partition part = parts.get(i); + List partval = partvals.get(i); + + alterPartition(dbname, tblname, partval, part); + } + + committed = commitTransaction(); + } catch (Exception e) { + exception = e; + } finally { + if (! committed) { + rollbackTransaction(); + } + + close(); + + if (exception != null) { + MetaException e = new MetaException("Failure."); + e.initCause(exception); + throw e; + } + } + } + + public boolean doesPartitionExist(String dbname, String tblname, List partvals) + throws MetaException { + // Required by add partition. + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + OracleTable mtbl = getOracleTable(dbname, tblname); + + if (mtbl != null) { + List names = getPartitionColumnNames(mtbl.id); + + String name = constructPartitionName(names, partvals); + + ps = this.connection.prepareStatement("SELECT COUNT(1) FROM V2_PARTITIONS WHERE TBL_ID = ? AND NAME = ?"); + ps.setLong(1, mtbl.id); + ps.setString(2, name); + + rs = ps.executeQuery(); + + int count = -1; + + if (rs.next()) { + count = rs.getInt(1); + } + + return count == 1; + } else { + return false; + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + } + + public Partition getPartition(String dbname, String tblname, List partvals) + throws MetaException, NoSuchObjectException { + Partition partition = null; + + long sdId = 0L; + long cdId = 0L; + long paramsId = 0L; + long sdParamId = 0L; + long serdeParamId = 0L; + String location = null; + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable tbl = getOracleTable(dbname, tblname); + + if (tbl != null) { + List colNames = getPartitionColumnNames(tbl.id); + String name = constructPartitionName(colNames, partvals); + + ps = this.connection.prepareStatement("SELECT SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS WHERE TBL_ID = ? AND NAME = ?"); + ps.setLong(1, tbl.id); + ps.setString(2, name); + + rs = ps.executeQuery(); + + if (rs.next()) { + sdId = rs.getLong(1); + if (rs.wasNull()) { + sdId = 0L; + } + cdId = rs.getLong(2); + if (rs.wasNull()) { + cdId = 0L; + } + paramsId = rs.getLong(3); + if (rs.wasNull()) { + paramsId = 0L; + } + sdParamId = rs.getLong(4); + if (rs.wasNull()) { + sdParamId = 0L; + } + serdeParamId = rs.getLong(5); + if (rs.wasNull()) { + serdeParamId = 0L; + } + location = rs.getString(6); + + partition = new Partition(); + partition.setDbName(dbname); + partition.setTableName(tblname); + partition.setValues(new ArrayList(partvals)); + partition.setCreateTime(rs.getInt(7)); + partition.setLastAccessTime(rs.getInt(9)); + partition.setParameters(new HashMap()); + partition.getParameters().put(hive_metastoreConstants.DDL_TIME, rs.getString(8)); + } else { + throw new NoSuchObjectException("partition values=" + partvals.toString()); + } + + if (partition != null) { + // Fetch storage descriptor. + if (sdId > 0L) { + StorageDescriptor sd = getStorageDescriptor(sdId, cdId, sdParamId, serdeParamId); + sd.setLocation(location); + + partition.setSd(sd); + } + + // Fetch table parameters. + partition.getParameters().putAll(getPartitionParams(paramsId)); + + LOG.info("Retrieved partition: " + partition); + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + if (partition == null) { + throw new NoSuchObjectException("partition values=" + partvals.toString()); + } + + return partition; + } + + public List getPartitions(String dbname, String tblname, int limit) + throws MetaException, NoSuchObjectException { + // Reconcile with getPartitionsByNames. + LOG.info("getPartitions: " + dbname + "." + tblname + " limit=" + limit); + + // TODO: Check the max number of partitions to return. + // Refer to MetaStoreDirectSql.getPartitionsViaSqlFilterInternal. + + List partitions = null; + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable tbl = getOracleTable(dbname, tblname); + + if (tbl != null) { + ps = this.connection.prepareStatement("SELECT PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS WHERE TBL_ID = ?"); + ps.setLong(1, tbl.id); + + partitions = getPartitions(ps, dbname, tblname, -1); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + close(); + } + + return partitions; + } + + public Partition getPartitionWithAuth(String dbname, String tblname, List partvals, String user, + List groups) throws MetaException, NoSuchObjectException, InvalidObjectException { + Table tbl = getTable(dbname, tblname); + Partition part = getPartition(dbname, tblname, partvals); + + if ("TRUE".equalsIgnoreCase(tbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + String name = Warehouse.makePartName(tbl.getPartitionKeys(), partvals); + PrincipalPrivilegeSet auth = getPartitionPrivilegeSet(dbname, tblname, name, user, groups); + part.setPrivileges(auth); + } + + return part; + } + + public List getPartitionsWithAuth(String dbname, String tblname, short limit, String user, + List groups) throws MetaException, NoSuchObjectException, InvalidObjectException { + Table tbl = getTable(dbname, tblname); + List parts = getPartitions(dbname, tblname, limit); + + if ("TRUE".equalsIgnoreCase(tbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + if (parts != null) { + for (Partition part : parts) { + String name = Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()); + PrincipalPrivilegeSet auth = this.getPartitionPrivilegeSet(dbname, tblname, name, user, groups); + part.setPrivileges(auth); + } + } + } + + return parts; + } + + public List getPartitionsByFilter(String dbname, String tblname, String filter, short limit) + throws MetaException, NoSuchObjectException { + List parts = null; + + LOG.info("dbname=" + dbname + " tblname=" + tblname + " filter=" + filter); + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = getOracleTable(dbname, tblname); + + if (mtbl != null) { + Table tbl = getTable(dbname, tblname); + ArrayList params = new ArrayList(); + String where = null; + + if (filter != null && ! filter.isEmpty()) { + ExpressionTree tree = OracleStoreUtils.createExpressionTree(filter); + + if (tree != null) { + where = PartitionFilterGenerator.generateSqlFilter(mtbl, tbl, tree, params); + + if (where == null) { + throw new UnsupportedOperationException("Invalid filter: " + filter); + } + + LOG.info("filter SQL=" + where + " params=" + params); + } else { + throw new UnsupportedOperationException("Invalid filter: " + filter); + } + } + + StringBuilder sb = new StringBuilder("SELECT PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS WHERE TBL_ID = ?"); + + if (where != null && params.size() > 0) { + sb.append(" AND " + where); + } + + sb.append(" ORDER BY 1"); + + if (limit > 0) { + sb.insert(0, "SELECT * FROM ("); + sb.append(") WHERE ROWNUM <= "); + sb.append(limit); + } + + LOG.info(sb.toString()); + + ps = this.connection.prepareStatement(sb.toString()); + ps.setLong(1, mtbl.id); + + for (int i = 0; i < params.size(); i++) { + ps.setString(i + 2, (String) params.get(i)); + } + + parts = getPartitions(ps, dbname, tblname, -1); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + close(); + } + + return parts; + } + + public boolean getPartitionsByExprNew(String dbname, String tblname, byte[] expr, String defaultPartitionName, + short limit, List result) throws TException { + // TODO: Verify this method is equivalent to calling getPartitionsByFilter. + LOG.info("getPartitionsByExpr: defaultPartitionName=" + defaultPartitionName + " limit=" + limit + " result=" + result); + + assert result != null; + + // We will try pushdown first, so make the filter. This will also validate the expression, + // if serialization fails we will throw incompatible metastore error to the client. + String filter = null; + try { + filter = this.expressionProxy.convertExprToFilter(expr); + LOG.info("filter=" + filter); + } catch (MetaException e) { + // TODO: Fix this exception. + //throw new IMetaStoreClient.IncompatibleMetastoreException(e.getMessage()); + throw e; + } + + AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); + + List partitions = getPartitionsByFilter(dbname, tblname, filter, limit); + + result.addAll(partitions); + + return hasUnknownPartitions.get(); + } + + /** + * @param dbname the name of the database. + * @param tblname the name of the table. + * @param expr Kryo serialized representation of the filter expression. + * @param defaultPartitionName ???. + * @param limit the limit applied to the result set. + * @param result the list of partitions matching the filter. + */ + public boolean getPartitionsByExpr(String dbname, String tblname, byte[] expr, String defaultPartitionName, + short limit, List result) throws TException { + // NOTE: See ObjectStore.getPartitionsByExprInternal + assert result != null; + + // We will try pushdown first, so make the filter. This will also validate the expression, + // if serialization fails we will throw incompatible metastore error to the client. + String filter = null; + try { + filter = this.expressionProxy.convertExprToFilter(expr); + } catch (MetaException e) { + // TODO: Fix this exception. + //throw new IMetaStoreClient.IncompatibleMetastoreException(e.getMessage()); + throw e; + } + + // Make a tree out of the filter. + // TODO: this is all pretty ugly. The only reason we need all these transformations + // is to maintain support for simple filters for HCat users that query metastore. + // If forcing everyone to use thick client is out of the question, maybe we could + // parse the filter into standard hive expressions and not all this separate tree + // Filter.g stuff. That way this method and ...ByFilter would just be merged. + ExpressionTree tree = OracleStoreUtils.createExpressionTree(filter); + + LOG.info("defaultPartitionName=" + defaultPartitionName + " filter=" + filter + "tree=" + tree); + + AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = null; + Table tbl = null; + + try { + open(); + + mtbl = getOracleTable(dbname, tblname); + tbl = getTable(dbname, tblname); + + // Original implementation tried direct SQL if an expression tree was available. + // Failing that, it would select all partitions names and filter externally. + // In the presence of a limit, it would attempt to do the same through JDO. + Integer max = (limit < 0) ? null : (int) limit; + + // If we have some sort of expression tree, try SQL filter pushdown. + List parts = null; + + if (tree != null) { + parts = getPartitionsWithFilter(mtbl, tbl, tree, max); + LOG.info("Partitions retrieved by direct query: " + (parts != null ? parts.size() : 0)); + } + + if (parts == null) { + // TODO: this is an expensive fall-back if the filter cannot be parsed. + + LinkedList names = new LinkedList(); + // TODO: limit should be integer not short. + hasUnknownPartitions.set(getPartitionNamesPrunedByExpr(mtbl, tbl, expr, defaultPartitionName, limit, names)); + + LOG.info("limit=" + limit + " maxPartitionsPermitted=" + this.maxPartitionsPermitted); + + if (limit > 0) { + names = new LinkedList(names.subList(0, limit)); + } else if (limit == 0 && names.size() > this.maxPartitionsPermitted) { + LOG.info("Partition limit - verifying if maxAllowed partitions will exceed, " + + "maxPartitionsPermitted:" + this.maxPartitionsPermitted + ", max:" + max); + // TODO: Fix MetaStoreDirectSql.MAXPARTLIMIT_MSGPREFIX. + String message = MAXPARTLIMIT_MSGPREFIX + "maxes out the partition limit (" + + this.maxPartitionsPermitted + "), specify a more restrictive partition predicate"; + LOG.error(message); + throw new MetaException(message); + } + + // TODO: Consider using getPartitionsByIds instead. Limit? + parts = getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), names); + LOG.info("Partitions retrieved by pruning: " + (parts != null ? parts.size() : 0)); + } + + if (parts != null) { + result.addAll(parts); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(); + } + + return hasUnknownPartitions.get(); + } + + /** + * Returns a list of Partitions associated with the specified names. + * @param dbname the database name. + * @param tblname the table name. + * @param partnames a list of partition names. + * @return a list of Partitions. + */ + public List getPartitionsByNames(String dbname, String tblname, List partnames) + throws MetaException, NoSuchObjectException { + LOG.info("getPartitionsByNames: " + dbname + "." + tblname + " names=" + partnames); + + if (partnames.isEmpty()) { + return new ArrayList(0); + } + + // TODO: Check the max number of partitions to return. + // Refer to MetaStoreDirectSql.getPartitionsViaSqlFilterInternal. + + int parts = partnames != null ? partnames.size() : 0; + List partitions = null; + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable tbl = getOracleTable(dbname, tblname); + + StringBuilder sb = new StringBuilder(); + String comma = ""; + + for (int i = 0; i < parts; i++) { + sb.append(comma + "?"); + comma = ","; + } + + // TODO: May need to batch IN clause for Oracle. + ps = this.connection.prepareStatement("SELECT PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS WHERE TBL_ID = ? AND NAME IN (" + sb.toString() + ")"); + ps.setLong(1, tbl.id); + + int n = 2; + + for (String name : partnames) { + ps.setString(n++, name); + } + + partitions = getPartitions(ps, dbname, tblname, parts); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + close(); + } + + return partitions; + } + + public List listPartitionNames(String dbname, String tblname, short limit) + throws MetaException { + ArrayList names = new ArrayList(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + OracleTable tbl = getOracleTable(dbname, tblname); + + if (limit > 0) { + ps = this.connection.prepareStatement("SELECT NAME FROM (SELECT NAME FROM V2_PARTITIONS WHERE TBL_ID = ? ORDER BY NAME) WHERE ROWNUM <= ?"); + ps.setLong(1, tbl.id); + ps.setInt(2, limit); + } else { + ps = this.connection.prepareStatement("SELECT NAME FROM V2_PARTITIONS WHERE TBL_ID = ? ORDER BY NAME"); + ps.setLong(1, tbl.id); + } + rs = ps.executeQuery(); + + while (rs.next()) { + names.add(rs.getString(1)); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return names; + } + + public List listPartitionNamesByFilter(String dbname, String tblname, String filter, short limit) + throws MetaException { + // TODO: Duplicates code from listPartitionValues + ArrayList partnames = new ArrayList(); + + LOG.info("dbname=" + dbname + " tblname=" + tblname + " filter=" + filter); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = getOracleTable(dbname, tblname); + + if (mtbl != null) { + Table tbl = getTable(dbname, tblname); + ArrayList params = new ArrayList(); + String where = null; + + if (filter != null && ! filter.isEmpty()) { + // TODO: Implement filters. + ExpressionTree tree = OracleStoreUtils.createExpressionTree(filter); + + if (tree != null) { + where = PartitionFilterGenerator.generateSqlFilter(mtbl, tbl, tree, params); + + if (where == null) { + throw new UnsupportedOperationException("Invalid filter: " + filter); + } + + LOG.info("filter SQL=" + where + " params=" + params); + } else { + throw new UnsupportedOperationException("Invalid filter: " + filter); + } + } + + StringBuilder sb = new StringBuilder("SELECT V2_PARTITIONS.NAME FROM V2_PARTITIONS WHERE V2_PARTITIONS.TBL_ID = ?"); + + if (where != null && params.size() > 0) { + sb.append(" AND " + where); + } + + sb.append(" ORDER BY 1"); + + if (limit > 0) { + sb.insert(0, "SELECT NAME FROM ("); + sb.append(") WHERE ROWNUM <= "); + sb.append(limit); + } + + LOG.info(sb.toString()); + + ps = this.connection.prepareStatement(sb.toString()); + ps.setLong(1, mtbl.id); + + for (int i = 0; i < params.size(); i++) { + ps.setString(i + 2, (String) params.get(i)); + } + + rs = ps.executeQuery(); + + while (rs.next()) { + partnames.add(rs.getString(1)); + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return partnames; + } + + public List listPartitionNamesPs(String dbname, String tblname, List partvals, short limit) + throws MetaException, NoSuchObjectException { + List names = new ArrayList(); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + OracleTable mtbl = getOracleTable(dbname, tblname); + + if (mtbl != null) { + List partcols = getPartitionColumns(mtbl.id); + + if (partvals.size() > partcols.size()) { + throw new MetaException("Invalid part spec: partcols=" + partcols.size() + " partvals=" + partvals.size()); + } + + StringBuilder sb = new StringBuilder(); + sb.append("SELECT NAME FROM V2_PARTITIONS WHERE TBL_ID = ? AND NAME LIKE '" + generatePartitionPsCondition(partcols, partvals) + "'"); + + if (limit > 0) { + sb.insert(0, "SELECT * FROM ("); + sb.append(") WHERE ROWNUM <= "); + sb.append(limit); + } + + ps = this.connection.prepareStatement(sb.toString()); + ps.setLong(1, mtbl.id); + + rs = ps.executeQuery(); + + while (rs.next()) { + names.add(rs.getString(1)); + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return names; + } + + public List listPartitionsPsWithAuth(String dbname, String tblname, + List partvals, short limit, String user, List groups) + throws MetaException, InvalidObjectException, NoSuchObjectException { + List parts = new ArrayList(); + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + OracleTable mtbl = getOracleTable(dbname, tblname); + Table tbl = getTable(dbname, tblname); + + if (tbl != null) { + List partcols = tbl.getPartitionKeys(); + + if (partvals.size() > partcols.size()) { + throw new MetaException("Invalid part spec: partcols=" + partcols.size() + " partvals=" + partvals.size()); + } + + StringBuilder sb = new StringBuilder(); + sb.append("SELECT PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS WHERE TBL_ID = ? AND NAME LIKE '"); + sb.append(generatePartitionPsCondition(partcols, partvals)); + sb.append("'"); + + if (limit > 0) { + sb.insert(0, "SELECT * FROM ("); + sb.append(") WHERE ROWNUM <= "); + sb.append(limit); + } + + ps = this.connection.prepareStatement(sb.toString()); + ps.setLong(1, mtbl.id); + + parts = getPartitions(ps, dbname, tblname, -1); + + if ("TRUE".equalsIgnoreCase(tbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE")) && user != null && groups != null) { + for (Partition part : parts) { + String name = Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()); + PrincipalPrivilegeSet auth = getPartitionPrivilegeSet(dbname, tblname, name, user, groups); + part.setPrivileges(auth); + } + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + close(); + } + + return parts; + } + + /* DataDiscovery support */ + public List
listTablesByQuery(String[] keywords, long limit) throws MetaException { + ArrayList
tables = new ArrayList
(); + + PreparedStatement ps = null; + ResultSet rs = null; + + ArrayList results = new ArrayList(); + Object[] row = new Object[2]; + + try { + open(); + + // TODO: Optimize this to return a list of TBL_IDs. + for (TableSearchSQL query : this.discoveryQueries) { + try { + ps = this.connection.prepareStatement(query.getFinalDirectSQLQuery(keywords)); + rs = ps.executeQuery(); + + while (rs.next()) { + row[0] = rs.getString(1); + row[1] = rs.getString(2); + MetaStoreUtils.addResults(results, row, query.getTag()); + + if (limit > 0 && results.size() >= limit) { + break; + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + } finally { + close(ps, rs); + } + + if (limit > 0 && results.size() >= limit) { + break; + } + } + + if (results.size() > 0) { + for (TableSearchResult result : results) { + Table table = getTable(result.getDatabase(), result.getTable()); + addTagsToTableProperty(result, table); + tables.add(table); + } + } + + LOG.info("Search found matching results: " + tables.size()); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(); + } + + return tables; + } + + public PartitionValuesResponse listPartitionValues(String dbname, String tblname, List cols, + boolean distinct, String filter, boolean ascending, List order, long limit) throws MetaException { + PartitionValuesResponse response = new PartitionValuesResponse(); + + LOG.info("dbname=" + dbname + " tblname=" + tblname + " cols=" + cols + " distinct=" + distinct + " filter=" + filter); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = getOracleTable(dbname, tblname); + + if (mtbl != null) { + Table tbl = getTable(dbname, tblname); + ArrayList params = new ArrayList(); + String where = null; + + if (filter != null && ! filter.isEmpty()) { + // TODO: Implement filters. + ExpressionTree tree = OracleStoreUtils.createExpressionTree(filter); + + if (tree != null) { + where = PartitionFilterGenerator.generateSqlFilter(mtbl, tbl, tree, params); + + if (where == null) { + throw new UnsupportedOperationException("Invalid filter: " + filter); + } + + LOG.info("filter SQL=" + where + " params=" + params); + } else { + throw new UnsupportedOperationException("Invalid filter: " + filter); + } + } + + List values = generatePartitionKeyValues(tbl.getPartitionKeys(), cols); + + StringBuilder sb = new StringBuilder(distinct ? "SELECT DISTINCT " : "SELECT "); + + String comma = ""; + for (String value : values) { + sb.append(comma); + sb.append(value); + comma = ", "; + } + + sb.append(" FROM V2_PARTITIONS WHERE V2_PARTITIONS.TBL_ID = ?"); + + if (where != null && params.size() > 0) { + sb.append(" AND " + where); + } + + sb.append(" ORDER BY 1"); + + if (limit > 0) { + sb.insert(0, "SELECT * FROM ("); + sb.append(") WHERE ROWNUM <= "); + sb.append(limit); + } + + LOG.info(sb.toString()); + + ps = this.connection.prepareStatement(sb.toString()); + ps.setLong(1, mtbl.id); + + for (int i = 0; i < params.size(); i++) { + ps.setString(i + 2, (String) params.get(i)); + } + + rs = ps.executeQuery(); + + response.setPartitionValues(new ArrayList()); + + while (rs.next()) { + PartitionValuesRow row = new PartitionValuesRow(); + + for (int i = 1; i <= values.size(); i++) { + row.addToRow(rs.getString(i)); + } + + response.addToPartitionValues(row); + } + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return response; + } + + /* Index support */ + public boolean addIndex(Index idx) throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public boolean dropIndex(String dbname, String tblname, String idxname) throws MetaException { + throw new UnsupportedOperationException(); + } + + public void alterIndex(String dbname, String tblname, String name, Index idx) + throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public Index getIndex(String dbname, String tblname, String idxname) throws MetaException { + throw new UnsupportedOperationException(); + } + + public List getIndexes(String dbname, String tblname, int limit) throws MetaException { + // TODO: Implement properly. + ArrayList indexes = new ArrayList(0); + + return indexes; + } + + public List listIndexNames(String dbname, String tblname, short max) throws MetaException { + // NOTE: Required by Pig. + + // TODO: Implement properly. + ArrayList names = new ArrayList(0); + + return names; + } + + /* Event support */ + public Table markPartitionForEvent(String dbname, String tblname, Map partVals, PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { + throw new UnsupportedOperationException(); + } + + public boolean isPartitionMarkedForEvent(String dbname, String tblname, Map partname, PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { + throw new UnsupportedOperationException(); + } + + public long cleanupEvents() { + throw new UnsupportedOperationException(); + } + + /* Role support */ + /** + * Adds a new role if it does not already exist. + * If the role already exists an InvalidObjectException is thrown. + * @param name the name of the role. + * @param owner the owner of the role. + * @return true if the new role was added; otherwise false. + */ + public boolean addRole(String name, String owner) throws InvalidObjectException, MetaException, NoSuchObjectException { + // Required by hcat_server init. + + boolean committed = false; + boolean success = false; + + PreparedStatement ps = null; + + try { + open(); + + if (! roleExists(name)) { + openTransaction(); + + long id = getNextRoleId(); + int now = (int) (System.currentTimeMillis() / 1000L); + + ps = this.connection.prepareStatement("INSERT INTO V2_ROLES (ROLE_ID, NAME, OWNER_NAME, CREATION_TIME) VALUES (?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setString(2, name); + ps.setString(3, owner); + ps.setInt(4, now); + + LOG.info(ps.toString()); + + int rc = ps.executeUpdate(); + + LOG.info("Added rows: " + rc); + + committed = commitTransaction(); + success = rc == 1; + } else { + throw new InvalidObjectException("Role " + name + " already exists."); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps); + + if (! committed) { + rollbackTransaction(); + } + + close(); + } + + return success; + } + + public boolean removeRole(String name) throws MetaException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + public boolean grantRole(Role role, String user, PrincipalType principalType, String grantor, + PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException, InvalidObjectException { + // Required by hcat_server init. + return true; + } + + public boolean revokeRole(Role role, String user, PrincipalType principalType, boolean grantOption) + throws MetaException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + public List listRoleNames() { + throw new UnsupportedOperationException(); + } + + public List listRoles(String name, PrincipalType type) { + throw new UnsupportedOperationException(); + } + + public List listRolesWithGrants(String name, PrincipalType type) { + throw new UnsupportedOperationException(); + } + + public List listRoleMembers(String name) { + throw new UnsupportedOperationException(); + } + + public org.apache.hadoop.hive.metastore.api.Role getRole(String name) throws NoSuchObjectException { + // Required by hcat_server init. + Role role = null; + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + ps = this.connection.prepareStatement("SELECT NAME, OWNER_NAME, CREATION_TIME FROM V2_ROLES WHERE NAME = ?"); + ps.setString(1, name); + + rs = ps.executeQuery(); + + if (rs.next()) { + role = new Role(rs.getString(1), rs.getInt(2), rs.getString(3)); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + } finally { + close(ps, rs); + close(); + } + + if (role == null) { + throw new NoSuchObjectException(name + " role can not be found."); + } + + return role; + } + + /* Privilege support */ + public PrincipalPrivilegeSet getUserPrivilegeSet(String user, List groups) + throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public PrincipalPrivilegeSet getDBPrivilegeSet (String dbname, String user, List groups) + throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public PrincipalPrivilegeSet getTablePrivilegeSet (String dbname, String tblname, String user, List groups) + throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbname, String tblname, String partition, + String user, List groups) throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public PrincipalPrivilegeSet getColumnPrivilegeSet (String dbname, String tblname, String partname, + String colname, String user, List groups) throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public List listPrincipalGlobalGrants(String principal, PrincipalType type) { + List privileges = null; + + PreparedStatement ps = null; + + try { + open(); + + ps = this.connection.prepareStatement("SELECT GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME FROM V2_GLOBAL_PRIVILEGES WHERE PRINCIPAL_NAME = ? AND PRINCIPAL_TYPE = ?"); + ps.setString(1, principal); + ps.setString(2, type.toString()); + + // TODO: Original implementation does not share this object. + HiveObjectRef ref = new HiveObjectRef(HiveObjectType.GLOBAL, null, null, null, null); + + privileges = getPrincipalGrants(ps, principal, type, ref); + } catch (SQLException e) { + LOG.error("Failure.", e); + } finally { + close(ps); + close(); + } + + return privileges; + } + + public List listPrincipalDBGrants(String principal, PrincipalType type, String dbname) { + List privileges = null; + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + OracleDatabase mdb = getOracleDatabase(dbname); + + if (mdb != null) { + ps = this.connection.prepareStatement("SELECT GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME FROM V2_DB_PRIVILEGES WHERE DB_ID = ? AND PRINCIPAL_NAME = ? AND PRINCIPAL_TYPE = ?"); + ps.setLong(1, mdb.id); + ps.setString(2, principal); + ps.setString(3, type.toString()); + + // TODO: Original implementation does not share this object. + HiveObjectRef ref = new HiveObjectRef(HiveObjectType.DATABASE, dbname, null, null, null); + + privileges = getPrincipalGrants(ps, principal, type, ref); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + } finally { + close(ps); + close(); + } + + return privileges; + } + + public List listAllTableGrants(String principal, PrincipalType type, String dbname, String tblname) { + List privileges = null; + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable tbl = getOracleTable(dbname, tblname); + + ps = this.connection.prepareStatement("SELECT GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME FROM V2_TBL_PRIVILEGES WHERE TBL_ID = ? AND PRINCIPAL_NAME = ? AND PRINCIPAL_TYPE = ?"); + ps.setLong(1, tbl.id); + ps.setString(2, principal); + ps.setString(3, type.toString()); + + // TODO: Original implementation does not share this object. + HiveObjectRef ref = new HiveObjectRef(HiveObjectType.TABLE, dbname, tblname, null, null); + + privileges = getPrincipalGrants(ps, principal, type, ref); + } catch (SQLException e) { + LOG.error("Failure.", e); + } finally { + close(ps); + close(); + } + + return privileges; + } + + public List listPrincipalPartitionGrants( + String principal, PrincipalType type, String dbname, + String tblname, List partvals, String partname) { + List privileges = null; + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable tbl = getOracleTable(dbname, tblname); + + ps = this.connection.prepareStatement("SELECT GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME FROM V2_PART_PRIVILEGES WHERE PART_ID = ? AND PRINCIPAL_NAME = ? AND PRINCIPAL_TYPE = ?"); + ps.setLong(1, tbl.id); + ps.setString(2, principal); + ps.setString(3, type.toString()); + + // TODO: Original implementation does not share this object. + HiveObjectRef ref = new HiveObjectRef(HiveObjectType.PARTITION, dbname, tblname, partvals, null); + + privileges = getPrincipalGrants(ps, principal, type, ref); + } catch (SQLException e) { + LOG.error("Failure.", e); + } finally { + close(ps); + close(); + } + + return privileges; + } + + public List listPrincipalTableColumnGrants(String principal, + PrincipalType type, String dbname, String tblname, String colname) { + List privileges = null; + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable tbl = getOracleTable(dbname, tblname); + + ps = this.connection.prepareStatement("SELECT GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME FROM V2_TBL_COL_PRIVILEGES WHERE TBL_ID = ? AND COLUMN_NAME = ? AND PRINCIPAL_NAME = ? AND PRINCIPAL_TYPE = ?"); + ps.setLong(1, tbl.id); + ps.setString(2, colname); + ps.setString(3, principal); + ps.setString(4, type.toString()); + + // TODO: Original implementation does not share this object. + HiveObjectRef ref = new HiveObjectRef(HiveObjectType.TABLE, dbname, tblname, null, colname); + + privileges = getPrincipalGrants(ps, principal, type, ref); + } catch (SQLException e) { + LOG.error("Failure.", e); + } finally { + close(ps); + close(); + } + + return privileges; + } + + public List listPrincipalPartitionColumnGrants( + String principal, PrincipalType type, String dbname, String tblname, + List partvals, String partname, String colname) { + List privileges = null; + + PreparedStatement ps = null; + + try { + open(); + + dbname = HiveStringUtils.normalizeIdentifier(dbname); + tblname = HiveStringUtils.normalizeIdentifier(tblname); + + OracleTable mtbl = getOracleTable(dbname, tblname); + OraclePartition mpart = getOraclePartition(mtbl, partname); + + ps = this.connection.prepareStatement("SELECT GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME FROM V2_PART_COL_PRIVILEGES WHERE PART_ID = ? AND COLUMN_NAME = ? AND PRINCIPAL_NAME = ? AND PRINCIPAL_TYPE = ?"); + ps.setLong(1, mpart.id); + ps.setString(2, colname); + ps.setString(3, principal); + ps.setString(4, type.toString()); + + // TODO: Original implementation does not share this object. + HiveObjectRef ref = new HiveObjectRef(HiveObjectType.COLUMN, dbname, tblname, partvals, colname); + + privileges = getPrincipalGrants(ps, principal, type, ref); + } catch (SQLException e) { + LOG.error("Failure.", e); + } finally { + close(ps); + close(); + } + + return privileges; + } + + public boolean grantPrivileges (PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException { + // Required by hcat_server init. + + boolean committed = false; + + int now = (int) (System.currentTimeMillis() / 1000); + + try { + open(); + + openTransaction(); + + List privilegeList = privileges.getPrivileges(); + + if (privilegeList != null && privilegeList.size() > 0) { + HashSet privSet = new HashSet(); + + for (HiveObjectPrivilege privDef : privilegeList) { + HiveObjectRef hiveObject = privDef.getHiveObject(); + String privilegeStr = privDef.getGrantInfo().getPrivilege(); + String[] privs = privilegeStr.split(","); + String userName = privDef.getPrincipalName(); + PrincipalType principalType = privDef.getPrincipalType(); + String grantor = privDef.getGrantInfo().getGrantor(); + String grantorType = privDef.getGrantInfo().getGrantorType().toString(); + boolean grantOption = privDef.getGrantInfo().isGrantOption(); + privSet.clear(); + + if (principalType == PrincipalType.ROLE){ + if (! roleExists(userName)) { + throw new NoSuchObjectException("Role " + userName + " does not exist"); + } + } + + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + List globalPrivs = listPrincipalGlobalGrants(userName, principalType); + + if (globalPrivs != null && globalPrivs.size() > 0) { + for (HiveObjectPrivilege priv : globalPrivs) { + if (priv.getGrantInfo().getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getGrantInfo().getPrivilege()); + } + } + } + + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + " is already granted by " + grantor); + } + + addGlobalPrivilege(userName, principalType.toString(), privilege, grantor, grantorType, grantOption, now); + } + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + OracleDatabase mdb = verifyDatabase(getOracleDatabase(hiveObject.getDbName()), hiveObject.getDbName()); + + if (mdb != null) { + List dbPrivs = listPrincipalDBGrants(userName, principalType, hiveObject.getDbName()); + + if (dbPrivs != null && dbPrivs.size() > 0) { + for (HiveObjectPrivilege priv : dbPrivs) { + if (priv.getGrantInfo().getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getGrantInfo().getPrivilege()); + } + } + } + + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + " is already granted on database " + hiveObject.getDbName() + " by " + grantor); + } + + addDbPrivilege(mdb.id, userName, principalType.toString(), privilege, grantor, grantorType, grantOption, now); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + OracleTable mtbl = getOracleTable(hiveObject.getDbName(), hiveObject.getObjectName()); + + if (mtbl != null) { + List tablePrivs = listAllTableGrants(userName, principalType, hiveObject.getDbName(), hiveObject.getObjectName()); + + if (tablePrivs != null && tablePrivs.size() > 0) { + for (HiveObjectPrivilege priv : tablePrivs) { + if (priv.getGrantInfo().getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getGrantInfo().getPrivilege()); + } + } + } + + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + " is already granted on table [" + hiveObject.getDbName() + "," + hiveObject.getObjectName() + "] by " + grantor); + } + + addTablePrivilege(mtbl.id, userName, principalType.toString(), privilege, grantor, grantorType, grantOption, now); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + OracleTable mtbl = getOracleTable(hiveObject.getDbName(), hiveObject.getObjectName()); + + if (mtbl != null) { + List colNames = getPartitionColumnNames(mtbl.id); + OraclePartition mpart = getOraclePartition(mtbl, constructPartitionName(colNames, hiveObject.getPartValues())); + + if (mpart != null) { + List partPrivs = listPrincipalPartitionGrants(userName, principalType, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getPartValues(), mpart.name); + + if (partPrivs != null && partPrivs.size() > 0) { + for (HiveObjectPrivilege priv : partPrivs) { + if (priv.getGrantInfo().getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getGrantInfo().getPrivilege()); + } + } + } + + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + " is already granted on partition [" + hiveObject.getDbName() + "," + hiveObject.getObjectName() + "," + mpart.name + "] by " + grantor); + } + + addPartitionPrivilege(mpart.id, userName, principalType.toString(), privilege, grantor, grantorType, grantOption, now); + } + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + OracleTable mtbl = getOracleTable(hiveObject.getDbName(), hiveObject.getObjectName()); + + if (mtbl != null) { + if (hiveObject.getPartValues() != null) { + List colNames = getPartitionColumnNames(mtbl.id); + OraclePartition mpart = getOraclePartition(mtbl, constructPartitionName(colNames, hiveObject.getPartValues())); + + if (mpart != null) { + List colPrivs = listPrincipalPartitionColumnGrants(userName, principalType, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getPartValues(), mpart.name, hiveObject.getColumnName()); + + if (colPrivs != null && colPrivs.size() > 0) { + for (HiveObjectPrivilege priv : colPrivs) { + if (priv.getGrantInfo().getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getGrantInfo().getPrivilege()); + } + } + } + + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + " is already granted on column " + hiveObject.getColumnName() + " [" + hiveObject.getDbName() + "," + hiveObject.getObjectName() + "," + mpart.name + "] by " + grantor); + } + + addPartitionColumnPrivilege(mpart.id, hiveObject.getColumnName(), userName, principalType.toString(), privilege, grantor, grantorType, grantOption, now); + } + } + } else { + List colPrivs = listPrincipalTableColumnGrants(userName, principalType, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getColumnName()); + + if (colPrivs != null && colPrivs.size() > 0) { + for (HiveObjectPrivilege priv : colPrivs) { + if (priv.getGrantInfo().getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getGrantInfo().getPrivilege()); + } + } + } + + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + " is already granted on column " + hiveObject.getColumnName() + " [" + hiveObject.getDbName() + "," + hiveObject.getObjectName() + "] by " + grantor); + } + + addTableColumnPrivilege(mtbl.id, hiveObject.getColumnName(), userName, principalType.toString(), privilege, grantor, grantorType, grantOption, now); + } + } + } + } + } + } + + committed = commitTransaction(); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + if (! committed) { + rollbackTransaction(); + } + + close(); + } + + return committed; + } + + public boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) + throws InvalidObjectException, MetaException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + public List listPrincipalDBGrantsAll(String name, PrincipalType type) { + throw new UnsupportedOperationException(); + } + + public List listPrincipalTableGrantsAll(String name, PrincipalType type) { + throw new UnsupportedOperationException(); + } + + public List listPrincipalPartitionGrantsAll(String name, PrincipalType type) { + throw new UnsupportedOperationException(); + } + + public List listPrincipalTableColumnGrantsAll(String name, PrincipalType type) { + throw new UnsupportedOperationException(); + } + + public List listPrincipalPartitionColumnGrantsAll(String name, PrincipalType type) { + throw new UnsupportedOperationException(); + } + + public List listGlobalGrantsAll() { + throw new UnsupportedOperationException(); + } + + public List listDBGrantsAll(String dbname) { + throw new UnsupportedOperationException(); + } + + public List listPartitionColumnGrantsAll(String dbname, String tblname, String partname, String colname) { + throw new UnsupportedOperationException(); + } + + public List listTableGrantsAll(String dbname, String tblname) { + throw new UnsupportedOperationException(); + } + + public List listPartitionGrantsAll(String dbname, String tblname, String partname) { + throw new UnsupportedOperationException(); + } + + public List listTableColumnGrantsAll(String dbname, String tblname, String colname) { + throw new UnsupportedOperationException(); + } + + /* Statistics support */ + public boolean updateTableColumnStatistics(ColumnStatistics stats) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + throw new UnsupportedOperationException(); + } + + public boolean updatePartitionColumnStatistics(ColumnStatistics stats, List partvals) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + throw new UnsupportedOperationException(); + } + + public ColumnStatistics getTableColumnStatistics(String dbname, String tblname, List colname) + throws MetaException, NoSuchObjectException { + LOG.warn("Method is not implemented."); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(true, dbname, tblname); + + ColumnStatistics stats = new ColumnStatistics(); + stats.setStatsDesc(desc); + stats.setStatsObj(new ArrayList(0)); + + return stats; + } + + public List getPartitionColumnStatistics(String dbname, String tblname, List partnames, List colnames) + throws MetaException, NoSuchObjectException { + LOG.warn("Method is not implemented."); + List stats = new ArrayList(0); + + return stats; + } + + public boolean deletePartitionColumnStatistics(String dbname, String tblname, String partname, List partvals, String colname) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + // TODO: Required by alterTable. + LOG.warn("Method is not implemented."); + return true; + } + + public boolean deleteTableColumnStatistics(String dbname, String tblname, String colname) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + // TODO: Required by alterTable. + LOG.warn("Method is not implemented."); + return true; + } + + /* Custom Type support (considering as deprecated) */ + public boolean createType(Type type) { + throw new UnsupportedOperationException(); + } + + public Type getType(String name) { + throw new UnsupportedOperationException(); + } + + public boolean dropType(String name) { + throw new UnsupportedOperationException(); + } + + /* Delegation Token support */ + public boolean addToken(String id, String token) { + throw new UnsupportedOperationException(); + } + + public boolean removeToken(String id) { + throw new UnsupportedOperationException(); + } + + public String getToken(String id) { + throw new UnsupportedOperationException(); + } + + public List getAllTokenIdentifiers() { + throw new UnsupportedOperationException(); + } + + public int addMasterKey(String key) throws MetaException { + throw new UnsupportedOperationException(); + } + + public void updateMasterKey(Integer seqnum, String key) throws NoSuchObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public boolean removeMasterKey(Integer seqnum) { + throw new UnsupportedOperationException(); + } + + public String[] getMasterKeys() { + throw new UnsupportedOperationException(); + } + + public void verifySchema() throws MetaException { + // Required by hcat_server init. + // Check that the schema is compatible. + } + + public String getMetaStoreSchemaVersion() throws MetaException { + throw new UnsupportedOperationException(); + } + + public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException { + throw new UnsupportedOperationException(); + } + + /* Function support */ + public void createFunction(Function func) throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public void dropFunction(String dbname, String funcname) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + throw new UnsupportedOperationException(); + } + + public void alterFunction(String dbname, String funcname, Function newFunction) throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + public Function getFunction(String dbname, String funcname) throws MetaException { + // Required by cli init. + Function func = null; + + String name = HiveStringUtils.normalizeIdentifier(dbname); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + long id = 0L; + + ps = this.connection.prepareStatement("SELECT A.FUNC_ID, A.NAME, A.TYPE, A.CLASS_NAME, A.OWNER_NAME, A.OWNER_TYPE, A.CREATION_TIME FROM V2_FUNCS A JOIN V2_DBS B ON (A.DB_ID = B.DB_ID) WHERE A.NAME = ? AND B.NAME = ?"); + ps.setString(1, funcname); + ps.setString(2, name); + + rs = ps.executeQuery(); + + if (rs.next()) { + id = rs.getLong(1); + + func = new Function(); + func.setFunctionName(rs.getString(2)); + func.setDbName(dbname); + func.setClassName(rs.getString(4)); + func.setOwnerName(rs.getString(5)); + func.setOwnerType(getPrincipalTypeFromString(rs.getString(6))); + func.setCreateTime(rs.getInt(7)); + func.setFunctionType(FunctionType.findByValue(rs.getInt(3))); + } + + if (id > 0L) { + func.setResourceUris(getFunctionResourceUris(id)); + } + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return func; + } + + public List getFunctions(String dbname, String pattern) throws MetaException { + // Required by cli init. + ArrayList names = new ArrayList(); + + LOG.info("db: " + dbname + " pattern: " + pattern); + + String name = HiveStringUtils.normalizeIdentifier(dbname); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + ps = this.connection.prepareStatement("SELECT A.NAME FROM V2_FUNCS A JOIN V2_DBS B ON (A.DB_ID = B.DB_ID) WHERE B.NAME = ? ORDER BY A.NAME ASC"); + ps.setString(1, name); + rs = ps.executeQuery(); + + while (rs.next()) { + names.add(rs.getString(1)); + } + + // Apply the filter pattern. + filterByPattern(pattern, names); + } catch (SQLException e) { + LOG.error("Failure.", e); + throw new MetaException(e.getMessage()); + } finally { + close(ps, rs); + close(); + } + + return names; + } + + public AggrStats get_aggr_stats_for(String dbname, String tblname, List partnames, List colnames) + throws MetaException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + public NotificationEventResponse getNextNotification(NotificationEventRequest req) { + throw new UnsupportedOperationException(); + } + + public void addNotificationEvent(NotificationEvent event) { + throw new UnsupportedOperationException(); + } + + public void cleanNotificationEvents(int olderThan) { + throw new UnsupportedOperationException(); + } + + public CurrentNotificationEventId getCurrentNotificationEventId() { + throw new UnsupportedOperationException(); + } + + private PrincipalType getPrincipalTypeFromString(String type) { + return type == null ? null : PrincipalType.valueOf(type); + } + + private Connection open() throws SQLException { + if (this.connection == null) { + this.connection = this.source.getConnection(); + } + + this.openConnectionCount++; + + LOG.info("Open connection: connection=" + System.identityHashCode(this.connection) + " thread_id=" + Thread.currentThread().getId() + " thread_name=" + Thread.currentThread().getName()); + if (this.pool != null) { + LOG.info("Connection pool: active=" + this.pool.getNumActive() + " idle=" + this.pool.getNumIdle()); + } + + return this.connection; + } + + private void close() { + if (this.connection != null) { + this.openConnectionCount--; + + if (this.openConnectionCount == 0) { + try { + LOG.info("Close connection: connection=" + System.identityHashCode(this.connection) + " thread_id=" + Thread.currentThread().getId() + " thread_name=" + Thread.currentThread().getName()); + this.connection.close(); + } catch(SQLException e) { + LOG.error("Unable to close Connection. May result in leak.", e); + } finally { + this.connection = null; + } + + if (this.pool != null) { + LOG.info("Connection pool: active=" + this.pool.getNumActive() + " idle=" + this.pool.getNumIdle()); + } + } + } + } + + private void close(PreparedStatement ps) { + if (ps != null) { + try { + ps.close(); + } catch (SQLException e) { + LOG.error("Unable to close PreparedStatement.", e); + } + } + } + + private void close(ResultSet rs) { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + LOG.error("Unable to close ResultSet.", e); + } + } + } + + private void close(PreparedStatement ps, ResultSet rs) { + close(rs); + close(ps); + } + + private String constructPartitionName(List names, List values) throws MetaException { + return Warehouse.makePartNameFromColumnNames(names, values, null); + } + + private boolean isValidPartition(Partition part, boolean ifNotExists) throws MetaException { + MetaStoreUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); + + boolean doesExist = doesPartitionExist(part.getDbName(), part.getTableName(), part.getValues()); + if (doesExist && ! ifNotExists) { + throw new MetaException("Partition already exists: " + part); + } + + return ! doesExist; + } + + /** + * Indicates whether the specified pattern is star or all-inclusive. + * @param pattern a (modified) Java regex used to filter lists of strings. + * @return true if a star pattern; otherwise false. + */ + private boolean isStarPattern(String pattern) { + return "*".equals(pattern); + } + + /** + * Removes strings from the list which do not match the specified pattern. + * This method works on the original list and returns the original list. + * The pattern can contain multiple individual sub-patterns separated by |; + * each sub-pattern will be considered separately. + * * is a valid pattern which is mapped to .*. + * @param pattern a (modified) Java regex pattern used to filter the list. + * @param names a list of strings. + * @return post-filtered names list. + */ + private List filterByPattern(String pattern, List names) { + if (pattern != null && ! isStarPattern(pattern)) { + // There may be multiple patterns separated by |. + String[] parts = pattern.trim().split("\\|"); + Pattern[] regexes = new Pattern[parts.length]; + int i = 0; + + for (String part : parts) { + part = "(?i)" + part.replaceAll("\\*", ".*"); + regexes[i++] = Pattern.compile(part); + } + + LOG.info("Patterns: " + regexes); + + Iterator iter = names.iterator(); + + while (iter.hasNext()) { + boolean remove = true; + + for (Pattern regex : regexes) { + Matcher m = regex.matcher(iter.next()); + + if (m.matches()) { + remove = false; + break; + } + } + + if (remove) { + iter.remove(); + } + } + } + + return names; + } + + /** + * Returns the next value in the specified sequence. + * @param sequence the name of the sequence. + * @return the next value in the specified sequence. + */ + private long getNextSequence(String sequence) throws SQLException { + PreparedStatement ps = null; + ResultSet rs = null; + + long id = 0L; + + try { + ps = this.connection.prepareStatement("SELECT " + sequence + ".NEXTVAL FROM DUAL"); + rs = ps.executeQuery(); + + if (rs.next()) { + id = rs.getLong(1); + } + } finally { + close(ps, rs); + } + + return id; + } + + private long getNextDatabaseId() throws SQLException { + return getNextSequence("V2_DBS_SEQ"); + } + + private long getNextTableId() throws SQLException { + return getNextSequence("V2_TBLS_SEQ"); + } + + private long getNextPartitionId() throws SQLException { + return getNextSequence("V2_PARTITIONS_SEQ"); + } + + private long getNextTablePartitionParamsId() throws SQLException { + return getNextSequence("V2_TBL_PART_PARAMS_SEQ"); + } + + private long getNextTableParamsId() throws SQLException { + return getNextTablePartitionParamsId(); + } + + private long getNextPartitionParamsId() throws SQLException { + return getNextTablePartitionParamsId(); + } + + private long getNextStorageDescriptorId() throws SQLException { + return getNextSequence("V2_SDS_SEQ"); + } + + private long getNextColumnDescriptorId() throws SQLException { + return getNextSequence("V2_CDS_SEQ"); + } + + private long getNextStorageDescriptorParamsId() throws SQLException { + return getNextSequence("V2_SD_PARAMS_SEQ"); + } + + private long getNextSerdeParamsId() throws SQLException { + return getNextSequence("V2_SERDE_PARAMS_SEQ"); + } + + private long getNextRoleId() throws SQLException { + return getNextSequence("V2_ROLES_SEQ"); + } + + private long getNextTypeId() throws SQLException { + return getNextSequence("V2_TYPES_SEQ"); + } + + /** + * Checks whether id represents a valid database; otherwise throws an Exception. + * @param id the id of the database. + * @param name the name of the database. + * @return the specified id. + * @throws NoSuchObjectException + */ + private OracleDatabase verifyDatabase(OracleDatabase mdb, String name) throws NoSuchObjectException { + if (mdb == null) { + // TODO: Message is different from ObjectStore. + throw new NoSuchObjectException("Database '" + name + "' does not exist."); + } + + return mdb; + } + + /** + * Checks whether table represents a valid table; otherwise throws an Exception. + * @param table an OracleTable object as returned by getOracleTable. + * @param dbname the name of the database. + * @param tblname the name of the table. + * @return the specified id. + * @throws InvalidObjectException + * @see getOracleTable + */ + private OracleTable verifyTable(OracleTable mtbl, String dbname, String tblname) throws InvalidObjectException { + if (mtbl == null || mtbl.id == 0L) { + // TODO: Message is different from ObjactStore. + throw new InvalidObjectException("Table '" + dbname + "." + tblname + "' does not exist"); + } + + return mtbl; + } + + /** + * Returns a light-weight meta-object for the specified database or null if it does not exist. + * @param dbname the database name. + * @return a meta-object for the database or null if it does not exist. + */ + private OracleDatabase getOracleDatabase(String dbname) throws SQLException { + PreparedStatement ps = null; + ResultSet rs = null; + OracleDatabase db = null; + + try { + LOG.info("query=SELECT DB_ID, NAME, LOCATION FROM V2_DBS WHERE NAME = ? params=[" + dbname + "]"); + + ps = this.connection.prepareStatement("SELECT DB_ID, NAME, LOCATION FROM V2_DBS WHERE NAME = ?"); + ps.setString(1, dbname); + + rs = ps.executeQuery(); + + if (rs.next()) { + db = new OracleDatabase(rs.getLong(1), rs.getString(2), rs.getString(3)); + } + } finally { + close(ps, rs); + } + + return db; + } + + /** + * Returns a light-weight meta-object for the specified table or null if it does not exist. + * @param mdb the meta-object for the database. + * @param tblname the table name. + * @return a meta-object for the table or null if it does not exist. + */ + private OracleTable getOracleTable(OracleDatabase mdb, String tblname) throws SQLException { + PreparedStatement ps = null; + ResultSet rs = null; + OracleTable mtbl = null; + + try { + LOG.info("mdb=" + mdb); + + if (mdb != null) { + LOG.info("query=SELECT TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, TYPE, LOCATION FROM V2_TBLS WHERE DB_ID = ? AND NAME = ? params=[" + mdb.id + ", " + tblname + "]"); + + ps = this.connection.prepareStatement("SELECT TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, TYPE, LOCATION FROM V2_TBLS WHERE DB_ID = ? AND NAME = ?"); + ps.setLong(1, mdb.id); + ps.setString(2, tblname); + + rs = ps.executeQuery(); + + if (rs.next()) { + long id = 0L; + mtbl = new OracleTable(rs.getLong(1), mdb.id, rs.getLong(2), rs.getLong(3), rs.getString(7), rs.getString(8), rs.getString(9)); + id = rs.getLong(4); + mtbl.paramsId = rs.wasNull() ? 0L : id; + id = rs.getLong(5); + mtbl.sdParamsId = rs.wasNull() ? 0L : id; + id = rs.getLong(6); + mtbl.serdeParamsId = rs.wasNull() ? 0L : id; + } + } + } finally { + close(ps, rs); + } + + return mtbl; + } + + /** + * Returns a light-weight meta-object for the specified table or null if it does not exist. + * @param dbname the database name. + * @param tblname the table name. + * @return a meta-object for the table or null if it does not exist. + */ + private OracleTable getOracleTable(String dbname, String tblname) throws SQLException { + return getOracleTable(getOracleDatabase(dbname), tblname); + } + + /** + * Returns a light-weight meta-object for the specified partition or null if it does not exist. + * @param mtbl the meta-object for the table. + * @param partname the partition name. + * @return a meta-object for the partition or null if it does not exist. + */ + private OraclePartition getOraclePartition(OracleTable mtbl, String partname) throws SQLException { + PreparedStatement ps = null; + ResultSet rs = null; + OraclePartition mpart = null; + + try { + if (mtbl != null) { + ps = this.connection.prepareStatement("SELECT PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS WHERE TBL_ID = ? AND NAME = ?"); + ps.setLong(1, mtbl.id); + ps.setString(2, partname); + + rs = ps.executeQuery(); + + if (rs.next()) { + long id = 0L; + + mpart = new OraclePartition(rs.getLong(1), rs.getLong(2), rs.getLong(3), rs.getLong(4), rs.getString(8), rs.getString(9), rs.getInt(10), rs.getString(11), rs.getInt(12)); + id = rs.getLong(5); + mpart.paramsId = rs.wasNull() ? 0L : id; + id = rs.getLong(6); + mpart.sdParamsId = rs.wasNull() ? 0L : id; + id = rs.getLong(7); + mpart.serdeParamsId = rs.wasNull() ? 0L : id; + } + } + } finally { + close(ps, rs); + } + + return mpart; + } + + /** + * Returns a light-weight meta-object for the specified partition or null if it does not exist. + * @param dbname the database name. + * @param tblname the table name. + * @param partname the partition name. + * @return a meta-object for the partition or null if it does not exist. + */ + private OraclePartition getOraclePartition(String dbname, String tblname, String partname) throws SQLException { + return getOraclePartition(getOracleTable(dbname, tblname), partname); + } + + /** + * Returns a list of light-weight meta-object for the specified partition matching the specified names. + * @param mtbl the meta-object for the table. + * @param partname the partition name. + * @return a meta-object for the partition or null if it does not exist. + */ + private List getOraclePartitions(OracleTable mtbl, List partnames) throws SQLException { + PreparedStatement ps = null; + ResultSet rs = null; + ArrayList mparts = new ArrayList(); + + try { + StringBuilder sb = new StringBuilder(); + String comma = ""; + + for (String name : partnames) { + sb.append(comma + "?"); + comma = ","; + } + + ps = this.connection.prepareStatement("SELECT PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS WHERE TBL_ID = ? AND NAME IN (" + sb.toString() + ")"); + ps.setLong(1, mtbl.id); + + int idx = 2; + for (String name : partnames) { + ps.setString(idx++, name); + } + + rs = ps.executeQuery(); + + while (rs.next()) { + long id = 0L; + OraclePartition mpart = new OraclePartition(rs.getLong(1), rs.getLong(2), rs.getLong(3), rs.getLong(4), rs.getString(8), rs.getString(9), rs.getInt(10), rs.getString(11), rs.getInt(12)); + id = rs.getLong(5); + mpart.paramsId = rs.wasNull() ? 0L : id; + id = rs.getLong(6); + mpart.sdParamsId = rs.wasNull() ? 0L : id; + id = rs.getLong(7); + mpart.serdeParamsId = rs.wasNull() ? 0L : id; + + mparts.add(mpart); + } + } finally { + close(ps, rs); + } + + return mparts; + } + + /** + * Deletes all of the parameters for the specified entity. + * @param ps the PreparedStatement used to delete the parameters. + * @return the number of rows affected. + */ + private int deleteParams(PreparedStatement ps) throws SQLException { + return ps.executeUpdate(); + } + + private int deleteDatabaseParams(long id) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("DELETE FROM V2_DB_PARAMS WHERE DB_ID = ?"); + ps.setLong(1, id); + + try { + return deleteParams(ps); + } finally { + close(ps); + } + } + + // TODO: should never be called + private int deleteTableParams(long id) throws SQLException { + // TODO: V2_TBL_PARAMS no longer exists. + PreparedStatement ps = this.connection.prepareStatement("DELETE FROM V2_TBL_PARAMS WHERE TBL_ID = ?"); + ps.setLong(1, id); + + try { + return deleteParams(ps); + } finally { + close(ps); + } + } + + // TODO: should never be called + private int deletePartitionParams(long id) throws SQLException { + // TODO: V2_PARTITION_PARAMS no longer exists. + PreparedStatement ps = this.connection.prepareStatement("DELETE FROM V2_PARTITION_PARAMS WHERE PART_ID = ?"); + ps.setLong(1, id); + + try { + return deleteParams(ps); + } finally { + close(ps); + } + } + + /** + * Returns a new Map containing the parameters for the specified entity. + * @param ps the PreparedStatement used to retrieve the parameters. + * @return a new Map containing the parameters for the specified entity; never null. + */ + private Map getParams(PreparedStatement ps) throws SQLException { + ResultSet rs = null; + + HashMap params = new HashMap(); + + try { + rs = ps.executeQuery(); + + while (rs.next()) { + params.put(rs.getString(1), rs.getString(2)); + } + + return params; + } finally { + close(rs); + } + } + + private Map getDatabaseParams(long id) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("SELECT NAME, VALUE FROM V2_DB_PARAMS WHERE DB_ID = ?"); + ps.setLong(1, id); + + try { + return getParams(ps); + } finally { + close(ps); + } + } + + private Map getTablePartitionParams(long id) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("SELECT NAME, VALUE FROM V2_TBL_PART_PARAMS WHERE TBL_PART_PARAM_ID = ?"); + ps.setLong(1, id); + + try { + return getParams(ps); + } finally { + close(ps); + } + } + + /** + * @see getPartitionParams + */ + private Map getTableParams(long id) throws SQLException { + return getTablePartitionParams(id); + } + + /** + * @see getTableParams + */ + private Map getPartitionParams(long id) throws SQLException { + return getTablePartitionParams(id); + } + + private Map getStorageDescriptorParams(long id) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("SELECT NAME, VALUE FROM V2_SD_PARAMS WHERE SD_PARAM_ID = ?"); + ps.setLong(1, id); + + try { + return getParams(ps); + } finally { + close(ps); + } + } + + private Map getSerdeParams(long id) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("SELECT NAME, VALUE FROM V2_SERDE_PARAMS WHERE SERDE_PARAM_ID = ?"); + ps.setLong(1, id); + + try { + return getParams(ps); + } finally { + close(ps); + } + } + + /** + * Sets the parameters of the specified entity. + * @param ps the PreparedStatement used to set the parameters. + * @param params the map of parameters name-value pairs. + * @return the number of rows affected. + */ + private int setParams(PreparedStatement ps, Map params) throws SQLException { + int rc = 0; + + // TODO: Convert to batch insert. + for (Map.Entry entry : params.entrySet()) { + ps.setString(1, entry.getKey()); + ps.setString(2, entry.getValue()); + + rc += ps.executeUpdate(); + } + + return rc; + } + + private int setDatabaseParams(long id, Map params) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("INSERT INTO V2_DB_PARAMS (NAME, VALUE, DB_ID) VALUES (?, ?, ?)"); + ps.setLong(3, id); + + try { + return setParams(ps, params); + } finally { + close(ps); + } + } + + private long setTablePartitionParams(long tblId, long paramsId, Map params) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("INSERT INTO V2_TBL_PART_PARAMS (NAME, VALUE, TBL_ID, TBL_PART_PARAM_ID) VALUES (?, ?, ?, ?)"); + ps.setLong(3, tblId); + ps.setLong(4, paramsId); + + try { + setParams(ps, params); + } finally { + close(ps); + } + + return paramsId; + } + + /** + * @see setPartitionParams + */ + private long setTableParams(long tblId, Map params) throws SQLException { + long id = getNextTableParamsId(); + + return setTableParams(tblId, id, params); + } + + private long setTableParams(long tblId, long paramsId, Map params) throws SQLException { + return setTablePartitionParams(tblId, paramsId, params); + } + + /** + * @see setTableParams + */ + private long setPartitionParams(long tblId, Map params) throws SQLException { + long id = getNextTableParamsId(); + + return setTablePartitionParams(tblId, id, params); + } + + private long setStorageDescriptorParams(long tblId, Map params) throws SQLException { + long id = getNextStorageDescriptorParamsId(); + + return setStorageDescriptorParams(tblId, id, params); + } + + private long setStorageDescriptorParams(long tblId, long sdParamsId, Map params) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("INSERT INTO V2_SD_PARAMS (NAME, VALUE, TBL_ID, SD_PARAM_ID) VALUES (?, ?, ?, ?)"); + ps.setLong(3, tblId); + ps.setLong(4, sdParamsId); + + try { + setParams(ps, params); + } finally { + close(ps); + } + + return sdParamsId; + } + + private long setSerdeParams(long tblId, Map params) throws SQLException { + long id = getNextSerdeParamsId(); + + return setSerdeParams(tblId, id, params); + } + + private long setSerdeParams(long tblId, long serdeParamsId, Map params) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("INSERT INTO V2_SERDE_PARAMS (NAME, VALUE, TBL_ID, SERDE_PARAM_ID) VALUES (?, ?, ?, ?)"); + ps.setLong(3, tblId); + ps.setLong(4, serdeParamsId); + + try { + setParams(ps, params); + } finally { + close(ps); + } + + return serdeParamsId; + } + + /** + * Creates a set of columns associated with the specified entity. + * @param ps the PreparedStatement used to create the columns. + * @param cols the list of FieldSchema columns. + */ + private void createColumns(PreparedStatement ps, List cols) throws SQLException { + int position = 0; + + // TODO: Convert to batch insert. + for (FieldSchema col : cols) { + // All column names must be normalized. + ps.setString(2, HiveStringUtils.normalizeIdentifier(col.getName())); + ps.setString(3, col.getType()); + ps.setInt(4, position++); + if (col.getComment() != null) { + ps.setString(5, col.getComment()); + } else { + ps.setNull(5, Types.VARCHAR); + } + + int rc = ps.executeUpdate(); + + LOG.info("Added rows: " + rc); + } + } + + /** + * Creates a set of partition columns for the specified table. + * Partition columns are immutable and will not change over the lifetime of the table. + * @param tblId the id of the table. + * @param cols the list of FieldSchema columns. + */ + private void createPartitionColumns(long tblId, List cols) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("INSERT INTO V2_PARTITION_COLS (TBL_ID, NAME, TYPE, POSITION, \"COMMENT\") VALUES (?, ?, ?, ?, ?)"); + ps.setLong(1, tblId); + + try { + createColumns(ps, cols); + } finally { + close(ps); + } + } + + private void deletePartitionColumns(long tblId) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("DELETE FROM V2_PARTITION_COLS WHERE TBL_ID = ?"); + ps.setLong(1, tblId); + + try { + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + /** + * Creates a set of table columns for the specified table. + * Table columns are mutable and can change over the lifetime of the table. + * The column descriptor tracks the version history of the table columns. + * @param tblId the id of the table. + * @param cols the list of FieldSchema columns. + */ + private long createTableColumns(long tblId, List cols) throws SQLException { + long id = getNextColumnDescriptorId(); + + return createTableColumns(tblId, id, cols); + } + + /** + * Creates a set of table columns for the specified table. + * Table columns are mutable and can change over the lifetime of the table. + * The column descriptor tracks the version history of the table columns. + * @param tblId the id of the table. + * @param cdId the id of the current column descriptors. + * @param cols the list of FieldSchema columns. + */ + private long createTableColumns(long tblId, long cdId, List cols) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("INSERT INTO V2_TBL_COLS (TBL_ID, NAME, TYPE, POSITION, \"COMMENT\", CD_ID) VALUES (?, ?, ?, ?, ?, ?)"); + ps.setLong(1, tblId); + ps.setLong(6, cdId); + + try { + createColumns(ps, cols); + } finally { + close(ps); + } + + return cdId; + } + + /** + * Returns the list of columns for the speficied entity. + * @param ps the PreparedStatement used to get the columns. + * @return the list of columns for the specified entity. + */ + private List getColumns(PreparedStatement ps) throws SQLException { + ArrayList cols = new ArrayList(); + + ResultSet rs = null; + + try { + rs = ps.executeQuery(); + + while (rs.next()) { + FieldSchema schema = new FieldSchema(); + schema.setName(rs.getString(2)); + schema.setType(rs.getString(3)); + String comment = rs.getString(4); + if (! rs.wasNull()) { + schema.setComment(comment); + } + + cols.add(schema); + } + } finally { + close(rs); + } + + return cols; + } + + /** + * Returns a list of FieldSchemas for the table columns of the specified table. + * @param id the table id. + * @return a list of FieldSchemas for the table columns of the specified table. + */ + private List getTableColumns(long id) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("SELECT POSITION, NAME, TYPE, \"COMMENT\" FROM V2_TBL_COLS WHERE CD_ID = ? ORDER BY POSITION ASC"); + ps.setLong(1, id); + + try { + return getColumns(ps); + } finally { + close(ps); + } + } + + /** + * Returns a list of FieldSchemas for the partition columns of the specified table. + * @param id the table id. + * @return a list of FieldSchemas for the partition columns of the specified table. + */ + private List getPartitionColumns(long id) throws SQLException { + PreparedStatement ps = this.connection.prepareStatement("SELECT POSITION, NAME, TYPE, \"COMMENT\" FROM V2_PARTITION_COLS WHERE TBL_ID = ? ORDER BY POSITION ASC"); + ps.setLong(1, id); + + try { + return getColumns(ps); + } finally { + close(ps); + } + } + + /** + * Returns a list of partition column names for the specified table. + * @param id the table id. + * @return a list of partition column names for the specified table. + */ + private List getPartitionColumnNames(long id) throws SQLException { + // TODO: Consider lazy loading this into OracleTable. + ArrayList names = new ArrayList(); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + ps = this.connection.prepareStatement("SELECT POSITION, NAME FROM V2_PARTITION_COLS WHERE TBL_ID = ? ORDER BY POSITION ASC"); + ps.setLong(1, id); + rs = ps.executeQuery(); + + while (rs.next()) { + names.add(rs.getString(2)); + } + } finally { + close(ps, rs); + } + + return names; + } + + /** + * Returns a list of Partitions. + * @param ps the PreparedStatement used to retrieve the partition data. + * @param dbname the database name. + * @param tblname the table name. + * @param parts the number of partitions expected. + * @return a list of Partitions. + */ + private List getPartitions(PreparedStatement ps, String dbname, String tblname, int parts) throws MetaException, SQLException { + // TODO: Consider breaking into getOraclePartitions(PreparedStatement, String, String, int) and getPartitions(List). + ArrayList partitions = null; + ArrayList infos = null; + + if (parts > 0) { + partitions = new ArrayList(parts); + infos = new ArrayList(parts); + } else { + partitions = new ArrayList(); + infos = new ArrayList(); + } + + ResultSet rs = null; + + try { + rs = ps.executeQuery(); + + while (rs.next()) { + long id = 0L; + long sdId = rs.getLong(3); + if (rs.wasNull()) { + sdId = 0L; + } + long cdId = rs.getLong(4); + if (rs.wasNull()) { + cdId = 0L; + } + String location = rs.getString(9); + if (rs.wasNull()) { + location = null; + } + OraclePartition info = new OraclePartition(rs.getLong(1), rs.getLong(2), sdId, cdId, + rs.getString(8), location, rs.getInt(10), rs.getString(11), rs.getInt(12)); + id = rs.getLong(5); + info.paramsId = rs.wasNull() ? 0L : id; + id = rs.getLong(6); + info.sdParamsId = rs.wasNull() ? 0L : id; + id = rs.getLong(7); + info.serdeParamsId = rs.wasNull() ? 0L : id; + + Partition partition = new Partition(); + partition.setValues(Warehouse.getPartValuesFromPartName(info.name)); + partition.setDbName(dbname); + partition.setTableName(tblname); + partition.setCreateTime(info.creationTime); + partition.setLastAccessTime(info.lastAccessTime); + partition.setParameters(new HashMap()); + partition.getParameters().put(hive_metastoreConstants.DDL_TIME, info.lastModifiedTime); + + infos.add(info); + partitions.add(partition); + } + } finally { + close(rs); + } + + if (infos.size() > 0) { + HashMap sds = new HashMap(); + HashMap> cds = new HashMap>(); + HashMap> params = new HashMap>(); + HashMap> sdParams = new HashMap>(); + HashMap> serdeParams = new HashMap>(); + + sdParams.put(0L, EMPTY_PARAMETERS); + serdeParams.put(0L, EMPTY_PARAMETERS); + + for (int i = 0; i < infos.size(); i++) { + OraclePartition info = infos.get(i); + Partition partition = partitions.get(i); + + // Fetch storage descriptor. + if (! sds.containsKey(info.sdId)) { + sds.put(info.sdId, getStorageDescriptor(info.sdId)); + } + + // Fetch columns. + if (! cds.containsKey(info.cdId)) { + cds.put(info.cdId, getTableColumns(info.cdId)); + } + + // Fetch storage descriptor params. + if (! sdParams.containsKey(info.sdParamsId)) { + sdParams.put(info.sdParamsId, getStorageDescriptorParams(info.sdParamsId)); + } + + // Fetch serde params. + if (! serdeParams.containsKey(info.serdeParamsId)) { + serdeParams.put(info.serdeParamsId, getSerdeParams(info.serdeParamsId)); + } + + if (info.sdId > 0L) { + StorageDescriptor sd = new StorageDescriptor(sds.get(info.sdId)); + sd.setCols(cds.get(info.cdId)); + sd.setLocation(info.location); + sd.setParameters(sdParams.get(info.sdParamsId)); + sd.getSerdeInfo().setParameters(serdeParams.get(info.serdeParamsId)); + // TODO: Replace with a common, empty SkewedInfo object. + sd.setSkewedInfo(new SkewedInfo(new ArrayList(0), new ArrayList>(0), new HashMap, String>(0))); + + partition.setSd(sd); + } + + // Fetch partition parameters. + if (! params.containsKey(info.paramsId)) { + params.put(info.paramsId, getPartitionParams(info.paramsId)); + } + + partition.getParameters().putAll(params.get(info.paramsId)); + } + } + + return partitions; + } + + /** + * Returns a list of Partitions associated with the specified ids. + * @param tbl the table. + * @param ids a list of partition ids. + * @return a list of Partitions. + */ + private List getPartitionsByIds(Table tbl, List ids) throws MetaException, SQLException { + // TODO: Deprecate method if not used. + int parts = ids != null ? ids.size() : 0; + List partitions = null; + + PreparedStatement ps = null; + + try { + StringBuilder sb = new StringBuilder(); + sb.append("SELECT PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS WHERE PART_ID IN ("); + + String comma = ""; + for (Long id : ids) { + sb.append(comma); + sb.append(id); + comma = ","; + } + + sb.append(")"); + + LOG.info("Query: " + sb.toString()); + + // TODO: May need to batch IN clause for Oracle. + ps = this.connection.prepareStatement(sb.toString()); + + partitions = getPartitions(ps, tbl.getDbName(), tbl.getTableName(), parts); + } finally { + close(ps); + } + + return partitions; + } + + /** + * Returns a list of Partitions matching the expression tree conditions. + * @param mtbl the table metadata. + * @param tbl the table. + * @param tree the expression tree containing the selection conditions. + * @param max the limit on the number of results to return. + * @return a list of Partitions. + */ + private List getPartitionsWithFilter(OracleTable mtbl, Table tbl, ExpressionTree tree, + Integer max) throws SQLException, MetaException { + // See MetaStoreDirectSQL.getPartitionsViaSqlFilter + assert tree != null; + + List params = new ArrayList(); + + // Derby and Oracle do not interpret filters ANSI-properly in some cases and need a workaround. + String filter = PartitionFilterGenerator.generateSqlFilter(mtbl, tbl, tree, params); + + if (filter == null) { + return null; // Cannot make SQL filter to push down. + } + + // See MetaStoreDirectSQL.getPartitionsViaSqlFilterInternal + + StringBuilder sb = new StringBuilder(); + sb.append("SELECT PART_ID, TBL_ID, SD_ID, CD_ID, TBL_PART_PARAM_ID, SD_PARAM_ID, SERDE_PARAM_ID, NAME, LOCATION, CREATION_TIME, LAST_MODIFIED_TIME, LAST_ACCESS_TIME FROM V2_PARTITIONS"); + sb.append(" WHERE TBL_ID = " + mtbl.id); + if (! StringUtils.isBlank(filter)) { + sb.append(" AND "); + sb.append(filter); + } + + // We have to be mindful of order during filtering if we are not returning all partitions. + if (max != null && max != -1 && max != 0) { + sb.append(" ORDER BY NAME ASC"); + } + + boolean checkSystemLimit = false; + + LOG.info("max=" + max + " maxPartitionsPermitted=" + this.maxPartitionsPermitted); + + // If max is 0 then apply maxPartitionsPermitted; otherwise apply max. + // TODO: Simplify this logic when max is a proper integer. + if (max != null && max >= 0) { + if (max == 0 && this.maxPartitionsPermitted != -1) { + // This is a system sanity limit. + checkSystemLimit = true; + sb.insert(0, "SELECT * FROM ("); + sb.append(") WHERE ROWNUM <= "); + sb.append(this.maxPartitionsPermitted + 1); + } else if (max > 0) { + // This is a user specified limit. + sb.insert(0, "SELECT * FROM ("); + sb.append(") WHERE ROWNUM <= "); + sb.append(max); + } + } + + LOG.info("query=" + sb.toString() + " params=" + params); + + PreparedStatement ps = null; + + List parts = null; + + try { + ps = this.connection.prepareStatement(sb.toString()); + + for (int i = 0; i < params.size(); i++) { + ps.setString(i + 1, params.get(i).toString()); + } + + parts = getPartitions(ps, tbl.getDbName(), tbl.getTableName(), -1); + } finally { + close(ps); + } + + LOG.info("Retrieved partitions: " + parts.size()); + + if (checkSystemLimit && parts.size() > this.maxPartitionsPermitted) { + LOG.info("Partition limit - verifying if maxAllowed partitions will exceed, " + + "maxPartitionsPermitted:" + this.maxPartitionsPermitted + ", max:" + max); + // TODO: Fix MetaStoreDirectSql.MAXPARTLIMIT_MSGPREFIX. + String message = MAXPARTLIMIT_MSGPREFIX + "maxes out the partition limit (" + + this.maxPartitionsPermitted + "), specify a more restrictive partition predicate"; + LOG.error(message); + throw new MetaException(message); + } + + return parts; + } + + /** + * Returns true if there are unknown partitions, otherwise false. + * @param mtbl the table metadata. + * @param tbl the table. + * @param expr Kryo serialized representation of the filter expression. + * @param defaultPartName the default partition name. + * @param max the limit on the number of results to return. + * @param result a list of partition names matching the expression. + * @return true if there are unknown partitions, otherwise false. + */ + private boolean getPartitionNamesPrunedByExpr(OracleTable mtbl, Table tbl, byte[] expr, + String defaultPartName, short max, List result) throws SQLException, MetaException { + // See ObjectStore.getPartitionNamesPrunedByExprNoTxn + PreparedStatement ps = null; + ResultSet rs = null; + + try { + String sql = "SELECT NAME FROM V2_PARTITIONS WHERE TBL_ID = ? ORDER BY NAME ASC"; + // TODO: Limit should not be applied before filtering. + /* + if (max > 0) { + sql = "SELECT NAME FROM (" + sql + ") WHERE ROWNUM <= " + max; + } else if (max == 0 && this.maxPartitionsPermitted != -1) { + sql = "SELECT NAME FROM (" + sql + ") WHERE ROWNUM <= " + (this.maxPartitionsPermitted + 1); + } + */ + + ps = this.connection.prepareStatement(sql); + ps.setLong(1, mtbl.id); + + rs = ps.executeQuery(); + + while(rs.next()) { + result.add(rs.getString(1)); + } + } finally { + close(ps, rs); + } + + ArrayList cols = new ArrayList(); + ArrayList types = new ArrayList(); + + for (FieldSchema fs : tbl.getPartitionKeys()) { + cols.add(fs.getName()); + types.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType())); + } + + if (defaultPartName == null || defaultPartName.isEmpty()) { + defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); + } + + return this.expressionProxy.filterPartitionsByExpr(cols, types, expr, defaultPartName, result); + } + + /** + * Delete all of the data for all partitions of the specified table. + * @param id the table id. + */ + private void deleteAllPartitions(long id) throws SQLException { + // SEE: MetaStoreDirectSql.dropAllPartitions + + // TODO: Delete from SKEWED_STRING_LIST via SKEWED_COL_VALUE_LOC_MAP. + + // NOTE: Delete from V2_TBL_COLS via cascading foreign key. + + // NOTE: Delete from V2_SD_PARAMS and V2_SERDE_PARAMS via cascading foreign key. + } + + private void dropPartitions(List mparts) throws SQLException { + // TODO: Implement batch execution. + + if (mparts.size() > 0) { + PreparedStatement ps = null; + + try { + StringBuilder sb = new StringBuilder(); + String comma = ""; + + for (OraclePartition mpart : mparts) { + sb.append(comma + mpart.id); + comma = ","; + } + + // Delete partition grants. + dropPartitionPrivileges(mparts); + + // Delete partition column stats. + dropPartitionColumnPrivileges(mparts); + + ps = this.connection.prepareStatement("DELETE FROM V2_PARTITIONS WHERE PART_ID IN (" + sb.toString() + ")"); + + int rc = ps.executeUpdate(); + + LOG.info("Deleted rows: " + rc); + + // TODO: Delete unused columns. + //preDropStorageDescriptor(part.getSd()); + } finally { + close(ps); + } + } + } + + /** + * Add a new storage descriptor. + * @param sd the storage descriptor. + * @return the id of the newly created storage descriptor. + */ + private long createStorageDescriptor(StorageDescriptor sd) throws SQLException { +/* +61 private int numBuckets; // required +63 private List bucketCols; // required +64 private List sortCols; // required +*/ + StorageDescriptor copy = sd.deepCopy(); + copy.setCols(null); + copy.setLocation(null); + copy.setNumBuckets(-1); + copy.setBucketCols(new ArrayList(0)); + copy.setSortCols(new ArrayList(0)); + copy.setParameters(null); + copy.setSkewedInfo(null); + copy.getSerdeInfo().setParameters(null); + + if (this.storageDescriptors != null && this.storageDescriptors.containsKey(copy)) { + LOG.info("Storage descriptor cache hit"); + return this.storageDescriptors.get(copy); + } else { + LOG.info("Storage descriptor cache miss"); + LOG.info(sd + " -> " + copy); + } + + long id = 0L; + + PreparedStatement ps = null; + + StringBuilder sb = new StringBuilder(); + sb.append(sd.getInputFormat()); + sb.append(" "); + sb.append(sd.getOutputFormat()); + sb.append(" "); + if (sd.getSerdeInfo().getName() != null) { + sb.append(sd.getSerdeInfo().getName()); + } + sb.append(" "); + if (sd.getSerdeInfo().getSerializationLib() != null) { + sb.append(sd.getSerdeInfo().getSerializationLib()); + } + + try { + id = getNextStorageDescriptorId(); + + ps = this.connection.prepareStatement("INSERT INTO V2_SDS (SD_ID, HASHCODE, IS_COMPRESSED, IS_STOREDASSUBDIRECTORIES, INPUT_FORMAT, OUTPUT_FORMAT, SERDE_NAME, SERDE_LIB) VALUES (?, ORA_HASH(?), ?, ?, ?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setString(2, sb.toString()); + ps.setBoolean(3, false); + ps.setBoolean(4, false); + ps.setString(5, sd.getInputFormat()); + ps.setString(6, sd.getOutputFormat()); + ps.setString(7, sd.getSerdeInfo().getName()); + ps.setString(8, sd.getSerdeInfo().getSerializationLib()); + + LOG.info(ps.toString()); + + int rc = ps.executeUpdate(); + + if (this.storageDescriptors != null) { + this.storageDescriptors.put(copy, id); + } + + LOG.info("Added rows: " + rc); + } catch (SQLIntegrityConstraintViolationException e) { + id = getStorageDescriptorId(sb.toString()); + + if (this.storageDescriptors != null) { + this.storageDescriptors.put(copy, id); + } + } finally { + close(ps); + } + + return id; + } + + /** + * Returns the specified StorageDescriptor with basic fields set only. + * @param id the storage descriptor id. + * @return the specfied StorageDescriptor. + */ + private StorageDescriptor getStorageDescriptor(long id) throws SQLException { +/* +61 private int numBuckets; // required +63 private List bucketCols; // required +64 private List sortCols; // required +*/ + + StorageDescriptor sd = null; + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + ps = this.connection.prepareStatement("SELECT IS_COMPRESSED, IS_STOREDASSUBDIRECTORIES, INPUT_FORMAT, OUTPUT_FORMAT, SERDE_NAME, SERDE_LIB FROM V2_SDS WHERE SD_ID = ?"); + ps.setLong(1, id); + + rs = ps.executeQuery(); + + if (rs.next()) { + sd = new StorageDescriptor(); + sd.setCompressed(rs.getBoolean(1)); + sd.setStoredAsSubDirectories(rs.getBoolean(2)); + sd.setInputFormat(rs.getString(3)); + sd.setOutputFormat(rs.getString(4)); + sd.setSerdeInfo(new SerDeInfo(rs.getString(5), rs.getString(6), null)); + sd.setSkewedInfo(new SkewedInfo(new ArrayList(0), new ArrayList>(0), new HashMap, String>(0))); + // TODO: Support these + sd.setNumBuckets(-1); + sd.setBucketCols(new ArrayList(0)); + sd.setSortCols(new ArrayList(0)); + } + } finally { + close(ps, rs); + } + + return sd; + } + + /** + * Returns the specified StorageDescriptor with all fields set. + * @param id the storage descriptor id. + * @param cdId the column descriptor id. + * @param sdParamId the storage descriptor parameters id. + * @param serdeParamId the serde parameters id. + * @return the specified StorageDescriptor. + */ + private StorageDescriptor getStorageDescriptor(long id, long cdId, long sdParamId, long serdeParamId) throws SQLException { + StorageDescriptor sd = getStorageDescriptor(id); + sd.setCols(getTableColumns(cdId)); + + // Fetch storage descriptor params and serde params. + if (sdParamId > 0L) { + sd.setParameters(getStorageDescriptorParams(sdParamId)); + } else { + sd.setParameters(EMPTY_PARAMETERS); + } + + if (serdeParamId > 0L) { + sd.getSerdeInfo().setParameters(getSerdeParams(serdeParamId)); + } else { + sd.getSerdeInfo().setParameters(EMPTY_PARAMETERS); + } + + // TODO: Set skewed info correctly. + sd.setSkewedInfo(new SkewedInfo(new ArrayList(0), new ArrayList>(0), new HashMap, String>(0))); + + return sd; + } + + private long getStorageDescriptorId(String hashcode) throws SQLException { + StorageDescriptor sd = null; + + PreparedStatement ps = null; + ResultSet rs = null; + + long id = 0; + + try { + ps = this.connection.prepareStatement("SELECT SD_ID FROM V2_SDS WHERE HASHCODE = ORA_HASH(?)"); + ps.setString(1, hashcode); + + rs = ps.executeQuery(); + + if (rs.next()) { + id = rs.getLong(1); + } + } finally { + close(ps, rs); + } + + return id; + } + + private Map getStorageDescriptorMap() throws SQLException { + HashMap map = new HashMap(); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + open(); + + ps = this.connection.prepareStatement("SELECT SD_ID, IS_COMPRESSED, IS_STOREDASSUBDIRECTORIES, INPUT_FORMAT, OUTPUT_FORMAT, SERDE_NAME, SERDE_LIB FROM V2_SDS"); + + rs = ps.executeQuery(); + + while (rs.next()) { + StorageDescriptor sd = new StorageDescriptor(); + sd.setCompressed(rs.getBoolean(2)); + sd.setStoredAsSubDirectories(rs.getBoolean(3)); + sd.setInputFormat(rs.getString(4)); + sd.setOutputFormat(rs.getString(5)); + sd.setSerdeInfo(new SerDeInfo(rs.getString(6), rs.getString(7), null)); + sd.setSkewedInfo(new SkewedInfo(new ArrayList(0), new ArrayList>(0), new HashMap, String>(0))); + // TODO: Support these + sd.setNumBuckets(-1); + sd.setBucketCols(new ArrayList(0)); + sd.setSortCols(new ArrayList(0)); + + map.put(sd, rs.getLong(1)); + } + } finally { + close(ps, rs); + close(); + } + + return map; + } + + /** + * Checks for the existence of the named role. + * @param name the name of the role. + * @return true if the role exists; otherwise false. + */ + private boolean roleExists(String name) throws SQLException { + PreparedStatement ps = null; + ResultSet rs = null; + + try { + ps = this.connection.prepareStatement("SELECT COUNT(1) FROM V2_ROLES WHERE NAME = ?"); + ps.setString(1, name); + + rs = ps.executeQuery(); + + int count = -1; + + if (rs.next()) { + count = rs.getInt(1); + } + + return count == 1; + } finally { + close(ps, rs); + } + } + + private List getPrincipalGrants(PreparedStatement ps, String principal, + PrincipalType type, HiveObjectRef ref) throws SQLException { + ResultSet rs = null; + + ArrayList privileges = new ArrayList(); + + try { + rs = ps.executeQuery(); + + while (rs.next()) { + PrivilegeGrantInfo info = new PrivilegeGrantInfo(rs.getString(4), rs.getInt(5), rs.getString(1), PrincipalType.valueOf(rs.getString(2)), rs.getBoolean(3)); + + HiveObjectPrivilege privilege = new HiveObjectPrivilege(); + privilege.setHiveObject(ref); + privilege.setPrincipalName(principal); + privilege.setPrincipalType(type); + privilege.setGrantInfo(info); + + privileges.add(privilege); + } + } finally { + close(rs); + } + + return privileges; + } + + private void addGlobalPrivilege(String principal, String type, String privilege, String grantor, + String grantorType, boolean grantOption, int now) throws SQLException { + PreparedStatement ps = null; + + try { + ps = this.connection.prepareStatement("INSERT INTO V2_GLOBAL_PRIVILEGES (PRINCIPAL_NAME, PRINCIPAL_TYPE, GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME) VALUES (?, ?, ?, ?, ?, ?, ?)"); + ps.setString(1, principal); + ps.setString(2, type); + ps.setString(3, grantor); + ps.setString(4, grantorType); + ps.setBoolean(5, grantOption); + ps.setString(6, privilege); + ps.setInt(7, now); + + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + private void addDbPrivilege(long id, String principal, String type, String privilege, String grantor, + String grantorType, boolean grantOption, int now) throws SQLException { + PreparedStatement ps = null; + + try { + ps = this.connection.prepareStatement("INSERT INTO V2_DB_PRIVILEGES (DB_ID, PRINCIPAL_NAME, PRINCIPAL_TYPE, GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setString(2, principal); + ps.setString(3, type); + ps.setString(4, grantor); + ps.setString(5, grantorType); + ps.setBoolean(6, grantOption); + ps.setString(7, privilege); + ps.setInt(8, now); + + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + private void addTablePrivilege(long id, String principal, String type, String privilege, String grantor, + String grantorType, boolean grantOption, int now) throws SQLException { + PreparedStatement ps = null; + + try { + ps = this.connection.prepareStatement("INSERT INTO V2_TBL_PRIVILEGES (TBL_ID, PRINCIPAL_NAME, PRINCIPAL_TYPE, GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setString(2, principal); + ps.setString(3, type); + ps.setString(4, grantor); + ps.setString(5, grantorType); + ps.setBoolean(6, grantOption); + ps.setString(7, privilege); + ps.setInt(8, now); + + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + private void addPartitionPrivilege(long id, String principal, String type, String privilege, String grantor, + String grantorType, boolean grantOption, int now) throws SQLException { + PreparedStatement ps = null; + + try { + ps = this.connection.prepareStatement("INSERT INTO V2_PART_PRIVILEGES (PART_ID, PRINCIPAL_NAME, PRINCIPAL_TYPE, GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setString(2, principal); + ps.setString(3, type); + ps.setString(4, grantor); + ps.setString(5, grantorType); + ps.setBoolean(6, grantOption); + ps.setString(7, privilege); + ps.setInt(8, now); + + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + private void dropPartitionPrivileges(List mparts) throws SQLException { + PreparedStatement ps = null; + + try { + StringBuilder sb = new StringBuilder(); + String comma = ""; + + for (OraclePartition mpart : mparts) { + sb.append(comma + mpart.id); + comma = ","; + } + + ps = this.connection.prepareStatement("DELETE FROM V2_PART_PRIVILEGES WHERE PART_ID IN (" + sb.toString() + ")"); + + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + private void addTableColumnPrivilege(long id, String column, String principal, String type, String privilege, + String grantor, String grantorType, boolean grantOption, int now) throws SQLException { + PreparedStatement ps = null; + + try { + ps = this.connection.prepareStatement("INSERT INTO V2_TBL_COL_PRIVILEGES (PART_ID, COLUMN_NAME, PRINCIPAL_NAME, PRINCIPAL_TYPE, GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setString(2, column); + ps.setString(3, principal); + ps.setString(4, type); + ps.setString(5, grantor); + ps.setString(6, grantorType); + ps.setBoolean(7, grantOption); + ps.setString(8, privilege); + ps.setInt(9, now); + + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + private void addPartitionColumnPrivilege(long id, String column, String principal, String type, String privilege, + String grantor, String grantorType, boolean grantOption, int now) throws SQLException { + PreparedStatement ps = null; + + try { + ps = this.connection.prepareStatement("INSERT INTO V2_PART_COL_PRIVILEGES (PART_ID, COLUMN_NAME, PRINCIPAL_NAME, PRINCIPAL_TYPE, GRANTOR, GRANTOR_TYPE, GRANT_OPTION, PRIVILEGE, CREATION_TIME) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"); + ps.setLong(1, id); + ps.setString(2, column); + ps.setString(3, principal); + ps.setString(4, type); + ps.setString(5, grantor); + ps.setString(6, grantorType); + ps.setBoolean(7, grantOption); + ps.setString(8, privilege); + ps.setInt(9, now); + + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + private void dropPartitionColumnPrivileges(List mparts) throws SQLException { + PreparedStatement ps = null; + + try { + StringBuilder sb = new StringBuilder(); + String comma = ""; + + for (OraclePartition mpart : mparts) { + sb.append(comma + mpart.id); + comma = ","; + } + + ps = this.connection.prepareStatement("DELETE FROM V2_PART_COL_PRIVILEGES WHERE PART_ID IN (" + sb.toString() + ")"); + + int rc = ps.executeUpdate(); + } finally { + close(ps); + } + } + + /** + * Return a list of ResourceUris for the specified function. + * @param id the function id. + * @return a list of ResourceUris for the specified function. + */ + private List getFunctionResourceUris(long id) throws SQLException { + ArrayList uris = new ArrayList(); + + PreparedStatement ps = null; + ResultSet rs = null; + + try { + ps = this.connection.prepareStatement("SELECT POSITION, TYPE, URI FROM V2_FUNC_RESOURCES WHERE FUNC_ID = ? ORDER BY POSITION ASC"); + ps.setLong(1, id); + + rs = ps.executeQuery(); + + while (rs.next()) { + ResourceUri uri = new ResourceUri(); + uri.setResourceType(ResourceType.findByValue(rs.getInt(2))); + uri.setUri(rs.getString(3)); + + uris.add(uri); + } + } finally { + close(ps, rs); + } + + return uris; + } + + private String generatePartitionPsCondition(List partcols, List partvals) throws MetaException { + List cols = partcols; + + if (partvals.size() < partcols.size()) { + cols = partcols.subList(0, partvals.size()); + } + + String cond = Warehouse.makePartName(cols, partvals, "%"); + + if (partvals.size() < partcols.size()) { + cond = cond + "%"; + } + + return cond; + } + + /* DataDiscovery */ + // TODO: Better encapsulation of data discovery methods. + private List initializeDiscoveryQueries() { + ArrayList queries = new ArrayList(); + + String[] exprs = new String[] { " AND ", " OR " }; + + for (String expr : exprs) { + + // Search in table name + queries.add(new TableSearchSQL("SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_TBLS " + + " INNER JOIN V2_DBS ON (V2_TBLS.DB_ID = V2_DBS.DB_ID) WHERE ", + " V2_TBLS.NAME ", expr, TableSearchResult.TABLE_NAME_MATCH)); + + // Search in table comments + queries.add(new TableSearchSQL(" SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_TBL_PART_PARAMS " + + " INNER JOIN V2_TBLS ON (V2_TBLS.TBL_PART_PARAM_ID = V2_TBL_PART_PARAMS.TBL_PART_PARAM_ID) " + + " INNER JOIN V2_DBS ON (V2_TBLS.DB_ID = V2_DBS.DB_ID) " + + " WHERE V2_TBL_PART_PARAMS.NAME = 'comment' AND " + + " V2_TBL_PART_PARAMS.VALUE IS NOT NULL AND ", + " V2_TBL_PART_PARAMS.VALUE ", expr, TableSearchResult.TABLE_COMMENTS_MATCH)); + + // Search in partition key names + queries.add(new TableSearchSQL("SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_PARTITION_COLS " + + " INNER JOIN V2_TBLS ON (V2_TBLS.TBL_ID = V2_PARTITION_COLS.TBL_ID) " + + " INNER JOIN V2_DBS ON (V2_DBS.DB_ID = V2_TBLS.DB_ID) WHERE ", + " V2_PARTITION_COLS.NAME ", expr, TableSearchResult.PARTITION_NAME_MATCH)); + + // Search in partition key comments + queries.add(new TableSearchSQL("SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_PARTITION_COLS " + + " INNER JOIN V2_TBLS ON (V2_TBLS.TBL_ID = V2_PARTITION_COLS.TBL_ID)" + + " INNER JOIN V2_DBS ON (V2_DBS.DB_ID = V2_TBLS.DB_ID) " + + " WHERE V2_PARTITION_COLS.\"COMMENT\" IS NOT NULL AND ", + " V2_PARTITION_COLS.\"COMMENT\" ", expr, TableSearchResult.PARTITION_COMMENTS_MATCH)); + + // Search in column names + queries.add(new TableSearchSQL("SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_TBL_COLS " + + " INNER JOIN V2_TBLS ON (V2_TBLS.CD_ID = V2_TBL_COLS.CD_ID) " + + " INNER JOIN V2_DBS ON (V2_TBLS.DB_ID = V2_DBS.DB_ID) WHERE ", + " V2_TBL_COLS.NAME ", expr, TableSearchResult.COLUMN_NAME_MATCH)); + + // Search in column comments + queries.add(new TableSearchSQL("SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_TBL_COLS " + + " INNER JOIN V2_TBLS ON (V2_TBLS.SD_ID = V2_TBL_COLS.CD_ID) " + + " INNER JOIN V2_DBS ON (V2_TBLS.DB_ID = V2_DBS.DB_ID) WHERE ", + " V2_TBL_COLS.\"COMMENT\" ", expr, TableSearchResult.COLUMN_COMMENTS_MATCH)); + + // Search in table owner + queries.add(new TableSearchSQL("SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_TBLS " + + " INNER JOIN V2_DBS ON (V2_TBLS.DB_ID = V2_DBS.DB_ID) WHERE ", + " V2_TBLS.OWNER_NAME ", expr, TableSearchResult.OWNER_MATCH)); + + // Search in table location + queries.add(new TableSearchSQL("SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_TBLS " + + " INNER JOIN V2_DBS ON (V2_TBLS.DB_ID = V2_DBS.DB_ID) WHERE ", + " V2_TBLS.LOCATION ", expr, TableSearchResult.LOCATION_MATCH)); + + // Search in table properties - keys + queries.add(new TableSearchSQL(" SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_TBL_PART_PARAMS " + + " INNER JOIN V2_TBLS ON (V2_TBLS.TBL_PART_PARAM_ID = V2_TBL_PART_PARAMS.TBL_PART_PARAM_ID) " + + " INNER JOIN V2_DBS ON (V2_TBLS.DB_ID = V2_DBS.DB_ID) " + + " WHERE V2_TBL_PART_PARAMS.NAME <> 'comment' AND ", + " V2_TBL_PART_PARAMS.NAME ", expr, TableSearchResult.TABLE_PROPERTY_MATCH)); + + // Search in table properties - values + queries.add(new TableSearchSQL(" SELECT V2_DBS.NAME, V2_TBLS.NAME FROM V2_TBL_PART_PARAMS " + + " INNER JOIN V2_TBLS ON (V2_TBLS.TBL_PART_PARAM_ID = V2_TBL_PART_PARAMS.TBL_PART_PARAM_ID) " + + " INNER JOIN V2_DBS ON (V2_TBLS.DB_ID = V2_DBS.DB_ID) " + + " WHERE V2_TBL_PART_PARAMS.NAME <> 'comment' AND " + + " V2_TBL_PART_PARAMS.VALUE IS NOT NULL AND ", + " V2_TBL_PART_PARAMS.VALUE ", expr, TableSearchResult.TABLE_PROPERTY_MATCH)); + } + + return queries; + } + + // TODO: Better encapsulation of data discovery methods. + private void addTagsToTableProperty(TableSearchResult result, Table table) { + if (table.getParameters() != null) { + table.getParameters().put(TableSearchResult.HIVE_METASTORE_SEARCH_TAGS, + result.getTagsAsString()); + } else { + Map params = new HashMap(); + params.put(TableSearchResult.HIVE_METASTORE_SEARCH_TAGS, + result.getTagsAsString()); + table.setParameters(params); + } + } + + private List generatePartitionKeyValues(List pcols, List cols) { + ArrayList values = new ArrayList(cols.size()); + + for (FieldSchema col : cols) { + int index = 0; + String name = null; + + for (FieldSchema pcol : pcols) { + if (pcol.getName().equals(col.getName())) { + name = pcol.getName(); + break; + } + + index++; + } + + if (name != null) { + values.add(generatePartitionKeyValue(name, index, pcols.size())); + } + } + + return values; + } + + private String generatePartitionKeyValue(String name, int index, int size) { + // TODO: Integrate with PartitionFilterGenerator + String value = null; + + if (index == 0) { + if (size == 1) { + // The first and only partition key. + String pos1 = "INSTR(V2_PARTITIONS.NAME, '" + name + "=', 1) + " + (name.length() + 1); + value = "SUBSTR(V2_PARTITIONS.NAME, " + pos1 + ")"; + } else { + // The first of many partition keys. + String pos1 = "INSTR(V2_PARTITIONS.NAME, '" + name + "=', 1) + " + (name.length() + 1); + String pos2 = "INSTR(V2_PARTITIONS.NAME, '/', 1)"; + value = "SUBSTR(V2_PARTITIONS.NAME, " + pos1 + ", " + pos2 + " - (" + pos1 + "))"; + } + } else if (index < size - 1) { + // The next {2..n-1} partition keys. + String pos1 = "INSTR(V2_PARTITIONS.NAME, '/" + name + "=', 1) + " + (name.length() + 2); + String pos2 = "INSTR(V2_PARTITIONS.NAME, '/', 1, " + (index + 1) + ")"; + value = "SUBSTR(V2_PARTITIONS.NAME, " + pos1 + ", " + pos2 + " - (" + pos1 + "))"; + } else { + // The last partition key. + String pos1 = "INSTR(V2_PARTITIONS.NAME, '/" + name + "=', 1) + " + (name.length() + 2); + value = "SUBSTR(V2_PARTITIONS.NAME, " + pos1 + ")"; + } + + return value; + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleStoreUtils.java new file mode 100644 index 0000000..5d9b461 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleStoreUtils.java @@ -0,0 +1,81 @@ +package org.apache.hadoop.hive.metastore.oracle; + +import org.antlr.runtime.CommonTokenStream; +import org.antlr.runtime.RecognitionException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; +import org.apache.hadoop.hive.metastore.parser.FilterLexer; +import org.apache.hadoop.hive.metastore.parser.FilterParser; + +public class OracleStoreUtils { + private static final Log LOG = LogFactory.getLog(OracleStoreUtils.class); + + private static class LikeChecker extends ExpressionTree.TreeVisitor { + private boolean hasLike; + + public boolean hasLike() { + return hasLike; + } + + @Override + protected boolean shouldStop() { + return hasLike; + } + + @Override + protected void visit(LeafNode node) throws MetaException { + hasLike = hasLike || (node.operator == Operator.LIKE); + } + } + + private static FilterParser getFilterParser(String filter) throws MetaException { + FilterLexer lexer = new FilterLexer(new ANTLRNoCaseStringStream(filter)); + CommonTokenStream tokens = new CommonTokenStream(lexer); + + FilterParser parser = new FilterParser(tokens); + try { + parser.filter(); + } catch(RecognitionException re) { + throw new MetaException("Error parsing partition filter; lexer error: " + + lexer.errorMsg + "; exception " + re); + } + + if (lexer.errorMsg != null) { + throw new MetaException("Error parsing partition filter : " + lexer.errorMsg); + } + + return parser; + } + + static ExpressionTree createExpressionTree(String filter) throws MetaException { + // TODO: ExprNodeDesc is an expression tree, we could just use that and be rid of Filter.g. + if (filter == null || filter.isEmpty()) { + return ExpressionTree.EMPTY_TREE; + } + + LOG.info("Filter specified is " + filter); + + ExpressionTree tree = null; + try { + tree = getFilterParser(filter).tree; + } catch (MetaException ex) { + LOG.warn("Unable to make the expression tree from expression string [" + filter + "]" + ex.getMessage()); + } + + if (tree == null) { + return null; + } + + // We suspect that LIKE pushdown into JDO is invalid; see HIVE-5134. Check for like here. + LikeChecker lc = new LikeChecker(); + tree.accept(lc); + + return lc.hasLike() ? null : tree; + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleTable.java metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleTable.java new file mode 100644 index 0000000..c34853d --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/oracle/OracleTable.java @@ -0,0 +1,33 @@ +package org.apache.hadoop.hive.metastore.oracle; + +import org.apache.hadoop.hive.metastore.TableType; + +public class OracleTable { + public long id; + public long dbId; + public long sdId; + public long cdId; + public long paramsId; + public long sdParamsId; + public long serdeParamsId; + public String name; + public String type; + public String location; + + public OracleTable(long id, long dbId, long sdId, long cdId, String name, String type, String location) { + this.id = id; + this.dbId = dbId; + this.sdId = sdId; + this.cdId = cdId; + this.paramsId = 0L; + this.sdParamsId = 0L; + this.serdeParamsId = 0L; + this.name = name; + this.type = type; + this.location = location; + } + + public boolean isView() { + return TableType.VIRTUAL_VIEW.toString().equals(this.type); + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/oracle/PartitionFilterGenerator.java metastore/src/java/org/apache/hadoop/hive/metastore/oracle/PartitionFilterGenerator.java new file mode 100644 index 0000000..05fcf18 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/oracle/PartitionFilterGenerator.java @@ -0,0 +1,221 @@ +package org.apache.hadoop.hive.metastore.oracle; + +import java.text.ParseException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; +import org.apache.hadoop.hive.serde.serdeConstants; + +public class PartitionFilterGenerator extends TreeVisitor { + private static final Log LOG = LogFactory.getLog(PartitionFilterGenerator.class); + + private final Table table; + private final OracleTable mtbl; + private final FilterBuilder filterBuffer; + private final List params; + private final boolean dbHasJoinCastBug; + + private static enum FilterType { + Integral, + String, + Date, + Invalid; + + static FilterType fromType(String colTypeStr) { + if (colTypeStr.equals(serdeConstants.STRING_TYPE_NAME)) { + return FilterType.String; + } else if (colTypeStr.equals(serdeConstants.DATE_TYPE_NAME)) { + return FilterType.Date; + } else if (serdeConstants.IntegralTypes.contains(colTypeStr)) { + return FilterType.Integral; + } + return FilterType.Invalid; + } + + public static FilterType fromClass(Object value) { + if (value instanceof String) { + return FilterType.String; + } else if (value instanceof Long) { + return FilterType.Integral; + } else if (value instanceof java.sql.Date) { + return FilterType.Date; + } + return FilterType.Invalid; + } + } + + /** + * Generate the ANSI SQL92 filter for the given expression tree + * @param table the table being queried + * @param params the ordered parameters for the resulting expression + * @return the string representation of the expression tree + */ + public static String generateSqlFilter(OracleTable mtbl, Table table, ExpressionTree tree, + List params) throws MetaException { + assert table != null; + if (tree.getRoot() == null) { + return ""; + } + + PartitionFilterGenerator visitor = new PartitionFilterGenerator(table, mtbl, params); + tree.accept(visitor); + if (visitor.filterBuffer.hasError()) { + LOG.info("Unable to push down SQL filter: " + visitor.filterBuffer.getErrorMessage()); + return null; + } + + return "(" + visitor.filterBuffer.getFilter() + ")"; + } + + private PartitionFilterGenerator(Table table, OracleTable mtbl, List params) { + this.table = table; + this.mtbl = mtbl; + this.params = params; + this.dbHasJoinCastBug = true; + this.filterBuffer = new FilterBuilder(false); + } + + @Override + protected void beginTreeNode(TreeNode node) throws MetaException { + filterBuffer.append(" ("); + } + + @Override + protected void midTreeNode(TreeNode node) throws MetaException { + filterBuffer.append((node.getAndOr() == LogicalOperator.AND) ? " AND " : " OR "); + } + + @Override + protected void endTreeNode(TreeNode node) throws MetaException { + filterBuffer.append(") "); + } + + @Override + protected boolean shouldStop() { + return filterBuffer.hasError(); + } + + /* + * Generate the following for partition key index {1..n-1}: + * SUBSTR(NAME, INSTR(NAME, 'ds=', 1) + 3, INSTR(NAME, '/', 1, m) - INSTR(NAME, 'ds=', 1) - 3) + * + * Generate the following for partition key index {n}: + * SUBSTR(NAME, INSTR(NAME, 'dr=', 1) + 3) + */ + @Override + public void visit(LeafNode node) throws MetaException { + if (node.operator == Operator.LIKE) { + filterBuffer.setError("LIKE is not supported for SQL filter pushdown"); + return; + } + + int partColCount = table.getPartitionKeys().size(); + int partColIndex = node.getPartColIndexForFilter(table, filterBuffer); + + if (filterBuffer.hasError()) { + return; + } + + // We skipped 'LIKE', other ops should all work as long as the types are right. + String fieldName = table.getPartitionKeys().get(partColIndex).getName(); + String colTypeStr = table.getPartitionKeys().get(partColIndex).getType(); + FilterType colType = FilterType.fromType(colTypeStr); + if (colType == FilterType.Invalid) { + filterBuffer.setError("Filter pushdown not supported for type " + colTypeStr); + return; + } + + FilterType valType = FilterType.fromClass(node.value); + Object nodeValue = node.value; + if (valType == FilterType.Invalid) { + filterBuffer.setError("Filter pushdown not supported for value " + node.value.getClass()); + return; + } + + // TODO: if Filter.g does date parsing for quoted strings, we'd need to verify there's no + // type mismatch when string col is filtered by a string that looks like date. + if (colType == FilterType.Date && valType == FilterType.String) { + // TODO: Filter.g cannot parse a quoted date; try to parse date here too. + try { + nodeValue = new java.sql.Date(HiveMetaStore.PARTITION_DATE_FORMAT.get().parse((String)nodeValue).getTime()); + valType = FilterType.Date; + } catch (ParseException e) { // do nothing, handled below - types will mismatch + } + } + + if (colType != valType) { + // It's not clear how filtering for e.g. "stringCol > 5" should work (which side is + // to be coerced?). Let the expression evaluation sort this one out, not metastore. + filterBuffer.setError("Cannot push down filter for " + colTypeStr + " column and value " + nodeValue.getClass()); + return; + } + + // Build the filter and add parameters linearly; we are traversing leaf nodes LTR. + String tableValue = ""; + if (partColIndex == 0) { + if (partColCount == 1) { + // The first and only partition key. + String pos1 = "INSTR(V2_PARTITIONS.NAME, '" + fieldName + "=', 1) + " + (fieldName.length() + 1); + tableValue = "SUBSTR(NAME, " + pos1 + ")"; + } else { + // The first of many partition key. + String pos1 = "INSTR(V2_PARTITIONS.NAME, '" + fieldName + "=', 1) + " + (fieldName.length() + 1); + String pos2 = "INSTR(V2_PARTITIONS.NAME, '/', 1)"; + tableValue = "SUBSTR(NAME, " + pos1 + ", " + pos2 + " - (" + pos1 + "))"; + } + } else if (partColIndex < partColCount - 1) { + // The next {2..n-1} partition keys. + String pos1 = "INSTR(V2_PARTITIONS.NAME, '/" + fieldName + "=', 1) + " + (fieldName.length() + 2); + String pos2 = "INSTR(V2_PARTITIONS.NAME, '/', 1, " + (partColIndex + 1) + ")"; + tableValue = "SUBSTR(NAME, " + pos1 + ", " + pos2 + " - (" + pos1 + "))"; + } else { + // The last partition key. + String pos1 = "INSTR(V2_PARTITIONS.NAME, '/" + fieldName + "=', 1) + " + (fieldName.length() + 2); + tableValue = "SUBSTR(NAME, " + pos1 + ")"; + } + + if (node.isReverseOrder) { + params.add(nodeValue); + } + + // TODO: Enable non-string partition keys + if (false && colType != FilterType.String) { + // The underlying database field is varchar, we need to compare numbers. + // Note that this won't work with __HIVE_DEFAULT_PARTITION__. It will fail and fall + // back to JDO. That is by design; we could add an ugly workaround here but didn't. + if (colType == FilterType.Integral) { + tableValue = "CAST(" + tableValue + " AS DECIMAL(21,0))"; + } else if (colType == FilterType.Date) { + tableValue = "CAST(" + tableValue + " AS DATE)"; + } + + /* + if (dbHasJoinCastBug) { + // This is a workaround for DERBY-6358 and Oracle bug; it is pretty horrible. + tableValue = "(CASE WHEN V2_PARTITIONS.TBL_ID = " + mtbl.id + " AND " + + filterName + ".PART_ID = V2_PARTITIONS.PART_ID AND " + + filterName + ".POSITION = " + partColIndex + " THEN " + + tableValue + " ELSE NULL END)"; + } + */ + } + + if (!node.isReverseOrder) { + params.add(nodeValue); + } + + filterBuffer.append(node.isReverseOrder ? "(? " + node.operator.getSqlOp() + " " + tableValue + ")" + : "(" + tableValue + " " + node.operator.getSqlOp() + " ?)"); + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/oracle/TableFilterGenerator.java metastore/src/java/org/apache/hadoop/hive/metastore/oracle/TableFilterGenerator.java new file mode 100644 index 0000000..8eff89b --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/oracle/TableFilterGenerator.java @@ -0,0 +1,92 @@ +package org.apache.hadoop.hive.metastore.oracle; + +import java.text.ParseException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; +import org.apache.hadoop.hive.serde.serdeConstants; + +public class TableFilterGenerator extends TreeVisitor { + private static final Log LOG = LogFactory.getLog(TableFilterGenerator.class); + + private final FilterBuilder filterBuffer; + private final List params; + private final boolean dbHasJoinCastBug; + + /** + * Generate the ANSI SQL92 filter for the given expression tree + * @param table the table being queried + * @param params the ordered parameters for the resulting expression + * @return the string representation of the expression tree + */ + public static String generateSqlFilter(ExpressionTree tree, List params) throws MetaException { + if (tree.getRoot() == null) { + return ""; + } + + TableFilterGenerator visitor = new TableFilterGenerator(params); + tree.accept(visitor); + if (visitor.filterBuffer.hasError()) { + LOG.info("Unable to push down SQL filter: " + visitor.filterBuffer.getErrorMessage()); + return null; + } + + return "(" + visitor.filterBuffer.getFilter() + ")"; + } + + private TableFilterGenerator(List params) { + this.params = params; + this.dbHasJoinCastBug = true; + this.filterBuffer = new FilterBuilder(false); + } + + @Override + protected void beginTreeNode(TreeNode node) throws MetaException { + filterBuffer.append(" ("); + } + + @Override + protected void midTreeNode(TreeNode node) throws MetaException { + filterBuffer.append((node.getAndOr() == LogicalOperator.AND) ? " AND " : " OR "); + } + + @Override + protected void endTreeNode(TreeNode node) throws MetaException { + filterBuffer.append(") "); + } + + @Override + protected boolean shouldStop() { + return filterBuffer.hasError(); + } + + @Override + public void visit(LeafNode node) throws MetaException { + if (filterBuffer.hasError()) { + return; + } + + if (node.operator == Operator.LIKE) { + filterBuffer.setError("LIKE is not supported for SQL filter pushdown"); + return; + } + + Object nodeValue = node.value; + + params.add(nodeValue); + + filterBuffer.append(node.isReverseOrder ? "(? " + node.operator.getSqlOp() + " V2_TBLS.NAME)" + : "(V2_TBLS.NAME " + node.operator.getSqlOp() + " ?)"); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java index eaf5c0c..86daf55 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java @@ -226,7 +226,7 @@ private void authorize(Table table, Partition part, Privilege[] readRequiredPriv writeRequiredPriv = privExtractor.getWriteReqPriv(); // authorize drops if there was a drop privilege requirement - if(privExtractor.hasDropPrivilege()) { + if (privExtractor.hasDropPrivilege() && part.getLocation() != null) { checkDeletePermission(part.getDataLocation(), getConf(), authenticator.getUserName()); }