diff --git beeline/src/java/org/apache/hive/beeline/BeeLine.java beeline/src/java/org/apache/hive/beeline/BeeLine.java index 65eee2c2bb..82077cc3a9 100644 --- beeline/src/java/org/apache/hive/beeline/BeeLine.java +++ beeline/src/java/org/apache/hive/beeline/BeeLine.java @@ -515,7 +515,7 @@ String loc(String res, Object[] params) { protected String locElapsedTime(long milliseconds) { if (getOpts().getShowElapsedTime()) { - return loc("time-ms", new Object[] {new Double(milliseconds / 1000d)}); + return loc("time-ms", new Object[] {Double.valueOf(milliseconds / 1000d)}); } return ""; } @@ -1203,6 +1203,10 @@ private String getDefaultConnectionUrl(CommandLine cl) throws BeelineConfFilePar if (password != null) { jdbcConnectionParams.getSessionVars().put(JdbcConnectionParams.AUTH_PASSWD, password); } + String auth = cl.getOptionValue("a"); + if (auth != null) { + jdbcConnectionParams.getSessionVars().put(JdbcConnectionParams.AUTH_TYPE, auth); + } mergedConnectionProperties = HS2ConnectionFileUtils.mergeUserConnectionPropertiesAndBeelineSite( userConnectionProperties, jdbcConnectionParams); diff --git beeline/src/java/org/apache/hive/beeline/Rows.java beeline/src/java/org/apache/hive/beeline/Rows.java index e3b983ef41..ce2ede0960 100644 --- beeline/src/java/org/apache/hive/beeline/Rows.java +++ beeline/src/java/org/apache/hive/beeline/Rows.java @@ -58,6 +58,7 @@ this.convertBinaryArray = beeLine.getOpts().getConvertBinaryArrayToString(); } + @Override public void remove() { throw new UnsupportedOperationException(); } @@ -75,42 +76,38 @@ public void remove() { * is not reliable for all databases. */ boolean isPrimaryKey(int col) { - if (primaryKeys[col] != null) { - return primaryKeys[col].booleanValue(); - } - - try { - // this doesn't always work, since some JDBC drivers (e.g., - // Oracle's) return a blank string from getTableName. - String table = rsMeta.getTableName(col + 1); - String column = rsMeta.getColumnName(col + 1); - - if (table == null || table.length() == 0 || - column == null || column.length() == 0) { - return (primaryKeys[col] = new Boolean(false)).booleanValue(); - } - - ResultSet pks = beeLine.getDatabaseConnection().getDatabaseMetaData().getPrimaryKeys( - beeLine.getDatabaseConnection().getDatabaseMetaData().getConnection().getCatalog(), null, table); - + if (primaryKeys[col] == null) { try { - while (pks.next()) { - if (column.equalsIgnoreCase( - pks.getString("COLUMN_NAME"))) { - return (primaryKeys[col] = new Boolean(true)).booleanValue(); + // this doesn't always work, since some JDBC drivers (e.g., + // Oracle's) return a blank string from getTableName. + String table = rsMeta.getTableName(col + 1); + String column = rsMeta.getColumnName(col + 1); + + if (table == null || table.isEmpty() || column == null || column.isEmpty()) { + primaryKeys[col] = Boolean.FALSE; + } else { + ResultSet pks = beeLine.getDatabaseConnection().getDatabaseMetaData().getPrimaryKeys( + beeLine.getDatabaseConnection().getDatabaseMetaData().getConnection().getCatalog(), null, table); + + primaryKeys[col] = Boolean.FALSE; + try { + while (pks.next()) { + if (column.equalsIgnoreCase(pks.getString("COLUMN_NAME"))) { + primaryKeys[col] = Boolean.TRUE; + break; + } + } + } finally { + pks.close(); } } - } finally { - pks.close(); + } catch (SQLException sqle) { + primaryKeys[col] = Boolean.FALSE; } - - return (primaryKeys[col] = new Boolean(false)).booleanValue(); - } catch (SQLException sqle) { - return (primaryKeys[col] = new Boolean(false)).booleanValue(); } + return primaryKeys[col].booleanValue(); } - class Row { final String[] values; final boolean isMeta; diff --git beeline/src/java/org/apache/hive/beeline/schematool/HiveSchemaTool.java beeline/src/java/org/apache/hive/beeline/schematool/HiveSchemaTool.java index c7234760e7..223bda782b 100644 --- beeline/src/java/org/apache/hive/beeline/schematool/HiveSchemaTool.java +++ beeline/src/java/org/apache/hive/beeline/schematool/HiveSchemaTool.java @@ -25,10 +25,10 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser; -import org.apache.hadoop.hive.metastore.tools.MetastoreSchemaTool; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper; +import org.apache.hadoop.hive.metastore.tools.schematool.MetastoreSchemaTool; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.MetaStoreConnectionInfo; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.NestedScriptParser; import org.apache.hive.beeline.BeeLine; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git beeline/src/test/org/apache/hive/beeline/schematool/TestHiveSchemaTool.java beeline/src/test/org/apache/hive/beeline/schematool/TestHiveSchemaTool.java index 8514dc8dc6..2311b0409f 100644 --- beeline/src/test/org/apache/hive/beeline/schematool/TestHiveSchemaTool.java +++ beeline/src/test/org/apache/hive/beeline/schematool/TestHiveSchemaTool.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index ae44b083be..0dea0996c9 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -3969,6 +3969,9 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal -1f, "The customized fraction of JVM memory which Tez will reserve for the processor"), TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED("hive.tez.cartesian-product.enabled", false, "Use Tez cartesian product edge to speed up cross product"), + TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB("hive.tez.unordered.output.buffer.size.mb", -1, + "When we have an operation that does not need a large buffer, we use this buffer size for simple custom edge.\n" + + "Value is an integer. Default value is -1, which means that we will estimate this value from operators in the plan."), // The default is different on the client and server, so it's null here. LLAP_IO_ENABLED("hive.llap.io.enabled", null, "Whether the LLAP IO layer is enabled."), LLAP_IO_ROW_WRAPPER_ENABLED("hive.llap.io.row.wrapper.enabled", true, "Whether the LLAP IO row wrapper is enabled for non-vectorized queries."), diff --git common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java index 0ee41c0898..a9cb009191 100644 --- common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java +++ common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java @@ -134,7 +134,7 @@ public static void setPerfLogger(PerfLogger resetPerfLogger) { */ public void PerfLogBegin(String callerName, String method) { long startTime = System.currentTimeMillis(); - startTimes.put(method, new Long(startTime)); + startTimes.put(method, Long.valueOf(startTime)); if (LOG.isDebugEnabled()) { LOG.debug(""); } @@ -159,7 +159,7 @@ public long PerfLogEnd(String callerName, String method) { public long PerfLogEnd(String callerName, String method, String additionalInfo) { Long startTime = startTimes.get(method); long endTime = System.currentTimeMillis(); - endTimes.put(method, new Long(endTime)); + endTimes.put(method, Long.valueOf(endTime)); long duration = startTime == null ? -1 : endTime - startTime.longValue(); if (LOG.isDebugEnabled()) { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java index 4d2f3d8211..1f05b892d9 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java @@ -22,6 +22,9 @@ import java.util.List; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.database.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -30,8 +33,6 @@ import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hive.hcatalog.common.HCatConstants; @@ -84,12 +85,13 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, } @Override - protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context, - Hive hive, DDLWork work) throws HiveException { - CreateDatabaseDesc createDb = work.getCreateDatabaseDesc(); - if (createDb != null) { + protected void authorizeDDLWork2(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork2 work) + throws HiveException { + DDLDesc ddlDesc = work.getDDLDesc(); + if (ddlDesc instanceof CreateDatabaseDesc) { + CreateDatabaseDesc createDb = (CreateDatabaseDesc)ddlDesc; Database db = new Database(createDb.getName(), createDb.getComment(), - createDb.getLocationUri(), createDb.getDatabaseProperties()); + createDb.getLocationUri(), createDb.getDatabaseProperties()); authorize(db, Privilege.CREATE); } } diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 8105e8ba54..fd159fe285 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -19,6 +19,12 @@ package org.apache.hive.hcatalog.cli.SemanticAnalysis; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.database.DescDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.ShowDatabasesDesc; +import org.apache.hadoop.hive.ql.ddl.database.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -32,16 +38,12 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; -import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; -import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.ErrorType; @@ -273,37 +275,35 @@ private String extractTableName(String compoundName) { } @Override - protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) - throws HiveException { - // DB opereations, none of them are enforced by Hive right now. - - ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc(); - if (showDatabases != null) { + protected void authorizeDDLWork2(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork2 work) + throws HiveException { + DDLDesc ddlDesc = work.getDDLDesc(); + if (ddlDesc instanceof ShowDatabasesDesc) { authorize(HiveOperation.SHOWDATABASES.getInputRequiredPrivileges(), - HiveOperation.SHOWDATABASES.getOutputRequiredPrivileges()); - } - - DropDatabaseDesc dropDb = work.getDropDatabaseDesc(); - if (dropDb != null) { + HiveOperation.SHOWDATABASES.getOutputRequiredPrivileges()); + } else if (ddlDesc instanceof DropDatabaseDesc) { + DropDatabaseDesc dropDb = (DropDatabaseDesc)ddlDesc; Database db = cntxt.getHive().getDatabase(dropDb.getDatabaseName()); if (db != null){ // if above returned a null, then the db does not exist - probably a // "drop database if exists" clause - don't try to authorize then. authorize(db, Privilege.DROP); } - } - - DescDatabaseDesc descDb = work.getDescDatabaseDesc(); - if (descDb != null) { + } else if (ddlDesc instanceof DescDatabaseDesc) { + DescDatabaseDesc descDb = (DescDatabaseDesc)ddlDesc; Database db = cntxt.getHive().getDatabase(descDb.getDatabaseName()); authorize(db, Privilege.SELECT); - } - - SwitchDatabaseDesc switchDb = work.getSwitchDatabaseDesc(); - if (switchDb != null) { + } else if (ddlDesc instanceof SwitchDatabaseDesc) { + SwitchDatabaseDesc switchDb = (SwitchDatabaseDesc)ddlDesc; Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName()); authorize(db, Privilege.SELECT); } + } + + @Override + protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) + throws HiveException { + // DB opereations, none of them are enforced by Hive right now. ShowTablesDesc showTables = work.getShowTblsDesc(); if (showTables != null) { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java index e359d53a60..e84dfdc931 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java @@ -23,6 +23,7 @@ import java.util.List; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -102,6 +103,11 @@ protected void authorizeDDL(HiveSemanticAnalyzerHookContext context, if (work != null) { authorizeDDLWork(context, hive, work); } + } else if (task.getWork() instanceof DDLWork2) { + DDLWork2 work = (DDLWork2) task.getWork(); + if (work != null) { + authorizeDDLWork2(context, hive, work); + } } } } catch (SemanticException ex) { @@ -122,6 +128,13 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context, Hive hive, DDLWork work) throws HiveException { } + /** + * Authorized the given DDLWork2. It is only for the interim time while DDLTask and DDLWork are being refactored. + */ + protected void authorizeDDLWork2(HiveSemanticAnalyzerHookContext context, + Hive hive, DDLWork2 work) throws HiveException { + } + protected void authorize(Privilege[] inputPrivs, Privilege[] outputPrivs) throws AuthorizationException, SemanticException { try { diff --git hcatalog/webhcat/svr/pom.xml hcatalog/webhcat/svr/pom.xml index 4dfade5aea..75f1c703bc 100644 --- hcatalog/webhcat/svr/pom.xml +++ hcatalog/webhcat/svr/pom.xml @@ -92,21 +92,45 @@ com.sun.jersey jersey-core ${jersey.version} + + + javax.ws.rs + jsr311-api + + com.sun.jersey jersey-json ${jersey.version} + + + com.sun.jersey + jersey-server + + com.sun.jersey jersey-servlet ${jersey.version} + + + com.sun.jersey + jersey-server + + com.sun.jersey.contribs wadl-resourcedoc-doclet ${wadl-resourcedoc-doclet.version} + + + com.sun.jersey + jersey-server + + org.apache.commons @@ -143,6 +167,11 @@ jul-to-slf4j ${slf4j.version} + + org.apache.hive + hive-jdbc + ${project.version} + org.apache.hadoop hadoop-auth @@ -199,6 +228,11 @@ + + javax.ws.rs + javax.ws.rs-api + ${rs-api.version} + org.apache.hive diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java index 1fd9e47dde..b566cf8ffc 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java @@ -163,6 +163,7 @@ * of escape/unescape methods in {@link org.apache.hadoop.util.StringUtils} in webhcat. */ public static final String HIVE_PROPS_NAME = "templeton.hive.properties"; + public static final String HIVE_SERVER2_URL = "templeton.hive.hs2.url"; public static final String SQOOP_ARCHIVE_NAME = "templeton.sqoop.archive"; public static final String SQOOP_PATH_NAME = "templeton.sqoop.path"; public static final String SQOOP_HOME_PATH = "templeton.sqoop.home"; diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java index 3f1968d7f1..3f679accea 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java @@ -28,6 +28,7 @@ import org.apache.commons.exec.ExecuteException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.hcatalog.templeton.tool.JobSubmissionConstants; import org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob; import org.apache.hive.hcatalog.templeton.tool.TempletonUtils; @@ -78,6 +79,11 @@ public EnqueueBean run(String user, Map userArgs, args.add("-p"); args.add("default"); + if (UserGroupInformation.isSecurityEnabled()) { + args.add("-a"); + args.add("delegationToken"); + } + //add mapreduce job tag placeholder args.add("--hiveconf"); args.add(TempletonControllerJob.HIVE_QUERY_TAG_ARG_PLACEHOLDER); diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java index c503a7acd1..bbe5947937 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java @@ -50,6 +50,7 @@ public class SecureProxySupport { private Path tokenPath; public static final String HCAT_SERVICE = "hcat"; + public static final String HIVE_SERVICE = "hive"; private final boolean isEnabled; private String user; diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java index 52738b760f..238ac48f32 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java @@ -233,7 +233,7 @@ public void setUser(String user) throws IOException { String jsonString = getField("userArgs"); - return (Map)JsonBuilder.jsonToMap(jsonString); + return JsonBuilder.jsonToMap(jsonString); } public void setUserArgs(Map userArgs) throws IOException @@ -295,7 +295,7 @@ public Long getLongField(String name) return null; else { try { - return new Long(s); + return Long.valueOf(s); } catch (NumberFormatException e) { LOG.error("templeton: bug " + name + " " + s + " : " + e); return null; diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java index b1f4a6ac5e..a776a0b3d8 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java @@ -18,6 +18,10 @@ */ package org.apache.hive.hcatalog.templeton.tool; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hive.hcatalog.templeton.SecureProxySupport; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -153,10 +157,10 @@ else if(TempletonUtils.isset(System.getenv(pathVarName))) { env.put(pathVarName, paths); } } - protected Process startJob(Configuration conf, String jobId, String user, String overrideClasspath, + protected Process startJob(Context context, String jobId, String user, String overrideClasspath, LauncherDelegator.JobType jobType) throws IOException, InterruptedException { - + Configuration conf = context.getConfiguration(); copyLocal(COPY_NAME, conf); String[] jarArgs = TempletonUtils.decodeArray(conf.get(JAR_ARGS_NAME)); @@ -174,6 +178,16 @@ protected Process startJob(Configuration conf, String jobId, String user, String handleTokenFile(jarArgsList, JobSubmissionConstants.TOKEN_FILE_ARG_PLACEHOLDER, "mapreduce.job.credentials.binary"); handleTokenFile(jarArgsList, JobSubmissionConstants.TOKEN_FILE_ARG_PLACEHOLDER_TEZ, "tez.credentials.path"); if (jobType == LauncherDelegator.JobType.HIVE) { + Credentials cred = new Credentials(); + Token token = context.getCredentials().getToken(new + Text(SecureProxySupport.HIVE_SERVICE)); + cred.addToken(new + Text(SecureProxySupport.HIVE_SERVICE), token); + File t = File.createTempFile("templeton", null); + Path tokenPath = new Path(t.toURI()); + cred.writeTokenStorageFile(tokenPath, conf); + env.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION, + tokenPath.toUri().getPath()); replaceJobTag(jarArgsList, JobSubmissionConstants.HIVE_QUERY_TAG_ARG_PLACEHOLDER, JobSubmissionConstants.HIVE_QUERY_TAG, jobId); } else { @@ -405,7 +419,7 @@ public void run(Context context) throws IOException, InterruptedException { killLauncherChildJobs(conf, context.getJobID().toString()); // Start the job - Process proc = startJob(conf, + Process proc = startJob(context, context.getJobID().toString(), conf.get("user.name"), conf.get(OVERRIDE_CLASSPATH), diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java index bbb33ccf5f..834b54b6c8 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java @@ -20,8 +20,12 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.sql.DriverManager; +import java.sql.SQLException; import java.util.Arrays; +import org.apache.hive.hcatalog.templeton.LauncherDelegator; +import org.apache.hive.jdbc.HiveConnection; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -142,32 +146,43 @@ public int run(String[] args) throws IOException, InterruptedException, ClassNot Token mrdt = jc.getDelegationToken(new Text("mr token")); job.getCredentials().addToken(new Text("mr token"), mrdt); } - String metastoreTokenStrForm = addHMSToken(job, user); + LauncherDelegator.JobType jobType = LauncherDelegator.JobType.valueOf(conf.get(JOB_TYPE)); + + String tokenStrForm = null; + if (jobType == LauncherDelegator.JobType.HIVE) { + tokenStrForm = addToken(job, user, SecureProxySupport.HIVE_SERVICE); + } else { + tokenStrForm = addToken(job, user, SecureProxySupport.HCAT_SERVICE); + } job.submit(); JobID submittedJobId = job.getJobID(); - if(metastoreTokenStrForm != null) { + if(tokenStrForm != null) { //so that it can be cancelled later from CompleteDelegator DelegationTokenCache.getStringFormTokenCache().storeDelegationToken( - submittedJobId.toString(), metastoreTokenStrForm); - LOG.debug("Added metastore delegation token for jobId=" + submittedJobId.toString() + + submittedJobId.toString(), tokenStrForm); + LOG.debug("Added delegation token for jobId=" + submittedJobId.toString() + " user=" + user); } return 0; } - private String addHMSToken(Job job, String user) throws IOException, InterruptedException, + private String addToken(Job job, String user, String type) throws IOException, InterruptedException, TException { if(!secureMetastoreAccess) { return null; } Token hiveToken = new Token(); - String metastoreTokenStrForm = buildHcatDelegationToken(user); - hiveToken.decodeFromUrlString(metastoreTokenStrForm); - job.getCredentials().addToken(new - Text(SecureProxySupport.HCAT_SERVICE), hiveToken); - return metastoreTokenStrForm; + String tokenStrForm; + if (type.equals(SecureProxySupport.HIVE_SERVICE)) { + tokenStrForm = buildHS2DelegationToken(user); + } else { + tokenStrForm = buildHcatDelegationToken(user); + } + hiveToken.decodeFromUrlString(tokenStrForm); + job.getCredentials().addToken(new Text(type), hiveToken); + return tokenStrForm; } private String buildHcatDelegationToken(String user) throws IOException, InterruptedException, TException { @@ -189,4 +204,37 @@ public String run() throws IOException, TException, InterruptedException { } }); } + + private String buildHS2DelegationToken(String user) throws IOException, InterruptedException, + TException { + final HiveConf c = new HiveConf(); + LOG.debug("Creating hiveserver2 delegation token for user " + user); + final UserGroupInformation ugi = UgiFactory.getUgi(user); + UserGroupInformation real = ugi.getRealUser(); + return real.doAs(new PrivilegedExceptionAction() { + @Override + public String run() throws IOException, TException, InterruptedException { + try { + Class.forName("org.apache.hive.jdbc.HiveDriver"); + } catch (ClassNotFoundException e) { + throw new IOException(e); + } + String hs2Url = appConf.get(AppConfig.HIVE_SERVER2_URL); + final HiveConnection con; + try { + con = (HiveConnection) DriverManager.getConnection(hs2Url); + } catch (SQLException e) { + throw new IOException(e); + } + String token = ugi.doAs(new PrivilegedExceptionAction() { + @Override + public String run() throws SQLException { + String u = ugi.getUserName(); + return con.getDelegationToken(u,u); + } + }); + return token; + } + }); + } } diff --git hplsql/src/main/java/org/apache/hive/hplsql/Cmp.java hplsql/src/main/java/org/apache/hive/hplsql/Cmp.java index 30d45871cf..3051f4b011 100644 --- hplsql/src/main/java/org/apache/hive/hplsql/Cmp.java +++ hplsql/src/main/java/org/apache/hive/hplsql/Cmp.java @@ -121,6 +121,7 @@ Integer run(HplsqlParser.Cmp_stmtContext ctx) { /** * Get data for comparison from the source */ + @Override public void run() { exec.executeQuery(ctx, query, conn); } @@ -208,7 +209,7 @@ else if (query2.error()) { exec.signal(e); return null; } - return new Boolean(equal); + return Boolean.valueOf(equal); } /** diff --git hplsql/src/main/java/org/apache/hive/hplsql/Exec.java hplsql/src/main/java/org/apache/hive/hplsql/Exec.java index 47f5cef7d9..a027ef9432 100644 --- hplsql/src/main/java/org/apache/hive/hplsql/Exec.java +++ hplsql/src/main/java/org/apache/hive/hplsql/Exec.java @@ -160,7 +160,7 @@ public Var setVariable(String name, String value) { } public Var setVariable(String name, int value) { - return setVariable(name, new Var(new Long(value))); + return setVariable(name, new Var(Long.valueOf(value))); } /** @@ -654,9 +654,9 @@ public void registerUdf() { void initOptions() { Iterator> i = exec.conf.iterator(); while (i.hasNext()) { - Entry item = (Entry)i.next(); - String key = (String)item.getKey(); - String value = (String)item.getValue(); + Entry item = i.next(); + String key = item.getKey(); + String value = item.getValue(); if (key == null || value == null || !key.startsWith("hplsql.")) { continue; } @@ -682,7 +682,7 @@ else if (key.startsWith("hplsql.")) { * Set SQLCODE */ public void setSqlCode(int sqlcode) { - Long code = new Long(sqlcode); + Long code = Long.valueOf(sqlcode); Var var = findVariable(SQLCODE); if (var != null) { var.setValue(code); @@ -720,7 +720,7 @@ public void setSqlState(String sqlstate) { public void setHostCode(int code) { Var var = findVariable(HOSTCODE); if (var != null) { - var.setValue(new Long(code)); + var.setValue(Long.valueOf(code)); } } @@ -2184,7 +2184,7 @@ public Integer visitSingle_quotedString(HplsqlParser.Single_quotedStringContext */ @Override public Integer visitInt_number(HplsqlParser.Int_numberContext ctx) { - exec.stack.push(new Var(new Long(ctx.getText()))); + exec.stack.push(new Var(Long.valueOf(ctx.getText()))); return 0; } @@ -2217,7 +2217,7 @@ public Integer visitBool_literal(HplsqlParser.Bool_literalContext ctx) { if (ctx.T_FALSE() != null) { val = false; } - stackPush(new Var(new Boolean(val))); + stackPush(new Var(Boolean.valueOf(val))); return 0; } diff --git hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java index eabb9fa7e0..07a9c98979 100644 --- hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java +++ hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java @@ -997,9 +997,9 @@ public Integer forRange(HplsqlParser.For_range_stmtContext ctx) { private Var setIndex(int start, int end, HplsqlParser.For_range_stmtContext ctx) { if (ctx.T_REVERSE() == null) - return new Var(ctx.L_ID().getText(), new Long(start)); + return new Var(ctx.L_ID().getText(), Long.valueOf(start)); else - return new Var(ctx.L_ID().getText(), new Long(end)); + return new Var(ctx.L_ID().getText(), Long.valueOf(end)); } /** @@ -1008,9 +1008,9 @@ private Var setIndex(int start, int end, HplsqlParser.For_range_stmtContext ctx) private void updateIndex(int step, Var index, HplsqlParser.For_range_stmtContext ctx) { if (ctx.T_REVERSE() == null) - index.increment(new Long(step)); + index.increment(step); else - index.decrement(new Long(step)); + index.decrement(step); } /** diff --git hplsql/src/main/java/org/apache/hive/hplsql/Udf.java hplsql/src/main/java/org/apache/hive/hplsql/Udf.java index eddf6d2fba..dde86d66ab 100644 --- hplsql/src/main/java/org/apache/hive/hplsql/Udf.java +++ hplsql/src/main/java/org/apache/hive/hplsql/Udf.java @@ -103,7 +103,7 @@ void setParameters(DeferredObject[] arguments) throws HiveException { else if (argumentsOI[i] instanceof IntObjectInspector) { Integer value = (Integer)((IntObjectInspector)argumentsOI[i]).getPrimitiveJavaObject(arguments[i].get()); if (value != null) { - exec.setVariable(name, new Var(new Long(value))); + exec.setVariable(name, new Var(Long.valueOf(value))); } } else if (argumentsOI[i] instanceof LongObjectInspector) { diff --git hplsql/src/main/java/org/apache/hive/hplsql/Var.java hplsql/src/main/java/org/apache/hive/hplsql/Var.java index 06b0e9bb57..d1151e716c 100644 --- hplsql/src/main/java/org/apache/hive/hplsql/Var.java +++ hplsql/src/main/java/org/apache/hive/hplsql/Var.java @@ -194,7 +194,7 @@ else if (val.type == Type.DOUBLE) { } else if (type == Type.DOUBLE) { if (val.type == Type.STRING) { - value = new Double((String)val.value); + value = Double.valueOf((String) val.value); } else if (val.type == Type.BIGINT || val.type == Type.DECIMAL) { value = Double.valueOf(val.doubleValue()); @@ -265,13 +265,13 @@ public Var setValue(ResultSet rs, ResultSetMetaData rsm, int idx) throws SQLExce } else if (type == java.sql.Types.INTEGER || type == java.sql.Types.BIGINT || type == java.sql.Types.SMALLINT || type == java.sql.Types.TINYINT) { - cast(new Var(new Long(rs.getLong(idx)))); + cast(new Var(Long.valueOf(rs.getLong(idx)))); } else if (type == java.sql.Types.DECIMAL || type == java.sql.Types.NUMERIC) { cast(new Var(rs.getBigDecimal(idx))); } else if (type == java.sql.Types.FLOAT || type == java.sql.Types.DOUBLE) { - cast(new Var(new Double(rs.getDouble(idx)))); + cast(new Var(Double.valueOf(rs.getDouble(idx)))); } return this; } @@ -411,7 +411,7 @@ else if (var.type == Type.DECIMAL) { } } else if (type == Type.STRING && var.type == Type.STRING && - ((String)value).equals((String)var.value)) { + ((String)value).equals(var.value)) { return true; } else if (type == Type.DECIMAL && var.type == Type.DECIMAL && @@ -475,9 +475,9 @@ public BigDecimal percentDiff(Var var) { /** * Increment an integer value */ - public Var increment(Long i) { + public Var increment(long i) { if (type == Type.BIGINT) { - value = new Long(((Long)value).longValue() + i); + value = Long.valueOf(((Long) value).longValue() + i); } return this; } @@ -485,12 +485,12 @@ public Var increment(Long i) { /** * Decrement an integer value */ - public Var decrement(Long i) { - if (type == Type.BIGINT) { - value = new Long(((Long)value).longValue() - i); - } - return this; - } + public Var decrement(long i) { + if (type == Type.BIGINT) { + value = Long.valueOf(((Long) value).longValue() - i); + } + return this; + } /** * Return an integer value diff --git hplsql/src/main/java/org/apache/hive/hplsql/functions/Function.java hplsql/src/main/java/org/apache/hive/hplsql/functions/Function.java index 61bcdeca1c..279dd2048f 100644 --- hplsql/src/main/java/org/apache/hive/hplsql/functions/Function.java +++ hplsql/src/main/java/org/apache/hive/hplsql/functions/Function.java @@ -711,7 +711,7 @@ void evalInt(Long i) { } void evalInt(int i) { - evalInt(new Long(i)); + evalInt(Long.valueOf(i)); } /** @@ -744,7 +744,7 @@ Var evalPop(ParserRuleContext ctx, int value) { if (ctx != null) { return evalPop(ctx); } - return new Var(new Long(value)); + return new Var(Long.valueOf(value)); } /** diff --git hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionString.java hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionString.java index 9c1037cb43..8b937e45f8 100644 --- hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionString.java +++ hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionString.java @@ -30,20 +30,20 @@ public FunctionString(Exec e) { */ @Override public void register(Function f) { - f.map.put("CONCAT", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { concat(ctx); }}); - f.map.put("CHAR", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { char_(ctx); }}); - f.map.put("INSTR", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { instr(ctx); }}); - f.map.put("LEN", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { len(ctx); }}); - f.map.put("LENGTH", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { length(ctx); }}); - f.map.put("LOWER", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { lower(ctx); }}); - f.map.put("REPLACE", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { replace(ctx); }}); - f.map.put("SUBSTR", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { substr(ctx); }}); - f.map.put("SUBSTRING", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { substr(ctx); }}); - f.map.put("TO_CHAR", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { toChar(ctx); }}); - f.map.put("UPPER", new FuncCommand() { public void run(HplsqlParser.Expr_func_paramsContext ctx) { upper(ctx); }}); + f.map.put("CONCAT", this::concat); + f.map.put("CHAR", this::char_); + f.map.put("INSTR", this::instr); + f.map.put("LEN", this::len); + f.map.put("LENGTH", this::length); + f.map.put("LOWER", this::lower); + f.map.put("REPLACE", this::replace); + f.map.put("SUBSTR", this::substr); + f.map.put("SUBSTRING", this::substr); + f.map.put("TO_CHAR", this::toChar); + f.map.put("UPPER", this::upper); - f.specMap.put("SUBSTRING", new FuncSpecCommand() { public void run(HplsqlParser.Expr_spec_funcContext ctx) { substring(ctx); }}); - f.specMap.put("TRIM", new FuncSpecCommand() { public void run(HplsqlParser.Expr_spec_funcContext ctx) { trim(ctx); }}); + f.specMap.put("SUBSTRING", this::substring); + f.specMap.put("TRIM", this::trim); } /** @@ -96,7 +96,7 @@ void instr(HplsqlParser.Expr_func_paramsContext ctx) { return; } else if(str.isEmpty()) { - evalInt(new Long(0)); + evalInt(0); return; } String substr = evalPop(ctx.func_param(1).expr()).toString(); @@ -139,7 +139,7 @@ else if(str.isEmpty()) { } } } - evalInt(new Long(idx)); + evalInt(idx); } /** @@ -151,7 +151,7 @@ void len(HplsqlParser.Expr_func_paramsContext ctx) { return; } int len = evalPop(ctx.func_param(0).expr()).toString().trim().length(); - evalInt(new Long(len)); + evalInt(len); } /** @@ -163,7 +163,7 @@ void length(HplsqlParser.Expr_func_paramsContext ctx) { return; } int len = evalPop(ctx.func_param(0).expr()).toString().length(); - evalInt(new Long(len)); + evalInt(len); } /** diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolCatalogOps.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolCatalogOps.java similarity index 99% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolCatalogOps.java rename to itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolCatalogOps.java index 43c0b96193..9669cd4264 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolCatalogOps.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolCatalogOps.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.text.StrTokenizer; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index 69d2648232..3d05db2949 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -944,7 +944,7 @@ public void testIncrementalDumpEmptyDumpDirectory() throws Throwable { CommandProcessorResponse response = replica.runCommand("REPL LOAD someJunkDB from '" + tuple.dumpLocation + "'"); assertTrue(response.getErrorMessage().toLowerCase() - .contains("org.apache.hadoop.hive.ql.exec.DDLTask. Database does not exist: someJunkDB" + .contains("org.apache.hadoop.hive.ql.ddl.DDLTask2. Database does not exist: someJunkDB" .toLowerCase())); // Bootstrap load from an empty dump directory should return empty load directory error. diff --git itests/hive-unit/src/test/java/org/apache/hive/beeline/schematool/TestSchemaTool.java itests/hive-unit/src/test/java/org/apache/hive/beeline/schematool/TestSchemaTool.java index 0132a0b077..e0b93f36ba 100644 --- itests/hive-unit/src/test/java/org/apache/hive/beeline/schematool/TestSchemaTool.java +++ itests/hive-unit/src/test/java/org/apache/hive/beeline/schematool/TestSchemaTool.java @@ -25,9 +25,9 @@ import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.PostgresCommandParser; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.NestedScriptParser; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.PostgresCommandParser; import org.junit.Assert; import org.junit.Test; diff --git jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/QueryConditionBuilder.java jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/QueryConditionBuilder.java deleted file mode 100644 index 194fad81d5..0000000000 --- jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/QueryConditionBuilder.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.storage.jdbc; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.plan.TableScanDesc; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hive.storage.jdbc.conf.JdbcStorageConfig; - -import java.beans.XMLDecoder; -import java.io.ByteArrayInputStream; -import java.util.HashMap; -import java.util.Map; - -/** - * Translates the hive query condition into a condition that can be run on the underlying database - */ -public class QueryConditionBuilder { - - private static final Logger LOGGER = LoggerFactory.getLogger(QueryConditionBuilder.class); - private static final String EMPTY_STRING = ""; - private static QueryConditionBuilder instance = null; - - - public static QueryConditionBuilder getInstance() { - if (instance == null) { - instance = new QueryConditionBuilder(); - } - - return instance; - } - - - private QueryConditionBuilder() { - - } - - - public String buildCondition(Configuration conf) { - if (conf == null) { - return EMPTY_STRING; - } - - String filterXml = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR); - String hiveColumns = conf.get(serdeConstants.LIST_COLUMNS); - String columnMapping = conf.get(JdbcStorageConfig.COLUMN_MAPPING.getPropertyName()); - - if ((filterXml == null) || ((columnMapping == null) && (hiveColumns == null))) { - return EMPTY_STRING; - } - - if (hiveColumns == null) { - hiveColumns = ""; - } - - Map columnMap = buildColumnMapping(columnMapping, hiveColumns); - String condition = createConditionString(filterXml, columnMap); - return condition; - } - - - /* - * Build a Hive-to-X column mapping, - * - */ - private Map buildColumnMapping(String columnMapping, String hiveColumns) { - if ((columnMapping == null) || (columnMapping.trim().isEmpty())) { - return createIdentityMap(hiveColumns); - } - - Map columnMap = new HashMap(); - String[] mappingPairs = columnMapping.toLowerCase().split(","); - for (String mapPair : mappingPairs) { - String[] columns = mapPair.split("="); - columnMap.put(columns[0].trim(), columns[1].trim()); - } - - return columnMap; - } - - - /* - * When no mapping is defined, it is assumed that the hive column names are equivalent to the column names in the - * underlying table - */ - private Map createIdentityMap(String hiveColumns) { - Map columnMap = new HashMap(); - String[] columns = hiveColumns.toLowerCase().split(","); - - for (String col : columns) { - columnMap.put(col.trim(), col.trim()); - } - - return columnMap; - } - - - /* - * Walk to Hive AST and translate the hive column names to their equivalent mappings. This is basically a cheat. - * - */ - private String createConditionString(String filterXml, Map columnMap) { - if ((filterXml == null) || (filterXml.trim().isEmpty())) { - return EMPTY_STRING; - } - - try (XMLDecoder decoder = new XMLDecoder(new ByteArrayInputStream(filterXml.getBytes("UTF-8")))) { - Object object = decoder.readObject(); - if (!(object instanceof ExprNodeDesc)) { - LOGGER.error("Deserialized filter expression is not of the expected type"); - throw new RuntimeException("Deserialized filter expression is not of the expected type"); - } - - ExprNodeDesc conditionNode = (ExprNodeDesc) object; - walkTreeAndTranslateColumnNames(conditionNode, columnMap); - return conditionNode.getExprString(); - } - catch (Exception e) { - LOGGER.error("Error during condition build", e); - return EMPTY_STRING; - } - } - - - /* - * Translate column names by walking the AST - */ - private void walkTreeAndTranslateColumnNames(ExprNodeDesc node, Map columnMap) { - if (node == null) { - return; - } - - if (node instanceof ExprNodeColumnDesc) { - ExprNodeColumnDesc column = (ExprNodeColumnDesc) node; - String hiveColumnName = column.getColumn().toLowerCase(); - if (columnMap.containsKey(hiveColumnName)) { - String dbColumnName = columnMap.get(hiveColumnName); - String finalName = formatColumnName(dbColumnName); - column.setColumn(finalName); - } - } - else { - if (node.getChildren() != null) { - for (ExprNodeDesc childNode : node.getChildren()) { - walkTreeAndTranslateColumnNames(childNode, columnMap); - } - } - } - } - - - /** - * This is an ugly hack for handling date column types because Hive doesn't have a built-in type for dates - */ - private String formatColumnName(String dbColumnName) { - if (dbColumnName.contains(":")) { - String[] typeSplit = dbColumnName.split(":"); - - if (typeSplit[1].equalsIgnoreCase("date")) { - return "{d " + typeSplit[0] + "}"; - } - - return typeSplit[0]; - } - else { - return dbColumnName; - } - } -} diff --git jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java index 5679f1b6eb..2039144d56 100644 --- jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java +++ jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java @@ -24,8 +24,6 @@ import org.apache.hadoop.conf.Configuration; -import org.apache.hive.storage.jdbc.QueryConditionBuilder; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -130,10 +128,6 @@ public static String getOrigQueryToExecute(Configuration config) { if (tableName != null) { // We generate query as select * query = "select * from " + tableName; - String hiveFilterCondition = QueryConditionBuilder.getInstance().buildCondition(config); - if ((hiveFilterCondition != null) && (!hiveFilterCondition.trim().isEmpty())) { - query = query + " WHERE " + hiveFilterCondition; - } } else { query = config.get(Constants.JDBC_QUERY); } @@ -152,11 +146,6 @@ public static String getQueryToExecute(Configuration config) { String tableName = config.get(JdbcStorageConfig.TABLE.getPropertyName()); query = "select * from " + tableName; - String hiveFilterCondition = QueryConditionBuilder.getInstance().buildCondition(config); - if ((hiveFilterCondition != null) && (!hiveFilterCondition.trim().isEmpty())) { - query = query + " WHERE " + hiveFilterCondition; - } - return query; } diff --git jdbc-handler/src/test/java/org/apache/hive/storage/jdbc/TestQueryConditionBuilder.java jdbc-handler/src/test/java/org/apache/hive/storage/jdbc/TestQueryConditionBuilder.java deleted file mode 100644 index a59645d837..0000000000 --- jdbc-handler/src/test/java/org/apache/hive/storage/jdbc/TestQueryConditionBuilder.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.storage.jdbc; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.ql.plan.TableScanDesc; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hive.storage.jdbc.conf.JdbcStorageConfig; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.util.Scanner; - -import static org.hamcrest.Matchers.equalToIgnoringWhiteSpace; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -public class TestQueryConditionBuilder { - - private static String condition1; - private static String condition2; - - - @BeforeClass - public static void setup() throws IOException { - condition1 = readFileContents("condition1.xml"); - condition2 = readFileContents("condition2.xml"); - } - - - private static String readFileContents(String name) throws IOException { - try (Scanner s = new Scanner(TestQueryConditionBuilder.class.getClassLoader().getResourceAsStream(name))) { - return s.useDelimiter("\\Z").next(); - } - } - - - @Test - public void testSimpleCondition_noTranslation() { - Configuration conf = new Configuration(); - conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, condition1); - conf.set(serdeConstants.LIST_COLUMNS, "visitor_id,sentiment,tracking_id"); - String condition = QueryConditionBuilder.getInstance().buildCondition(conf); - - assertThat(condition, is(notNullValue())); - assertThat(condition, is(equalToIgnoringWhiteSpace("(visitor_id = 'x')"))); - } - - - @Test - public void testSimpleCondition_withTranslation() { - Configuration conf = new Configuration(); - conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, condition1); - conf.set(serdeConstants.LIST_COLUMNS, "visitor_id,sentiment,tracking_id"); - conf.set(JdbcStorageConfig.COLUMN_MAPPING.getPropertyName(), - "visitor_id=vid, sentiment=sentiment, tracking_id=tracking_id"); - String condition = QueryConditionBuilder.getInstance().buildCondition(conf); - - assertThat(condition, is(notNullValue())); - assertThat(condition, is(equalToIgnoringWhiteSpace("(vid = 'x')"))); - } - - - @Test - public void testSimpleCondition_withDateType() { - Configuration conf = new Configuration(); - conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, condition1); - conf.set(serdeConstants.LIST_COLUMNS, "visitor_id,sentiment,tracking_id"); - conf.set(JdbcStorageConfig.COLUMN_MAPPING.getPropertyName(), - "visitor_id=vid:date, sentiment=sentiment, tracking_id=tracking_id"); - String condition = QueryConditionBuilder.getInstance().buildCondition(conf); - - assertThat(condition, is(notNullValue())); - assertThat(condition, is(equalToIgnoringWhiteSpace("({d vid} = 'x')"))); - } - - - @Test - public void testSimpleCondition_withVariedCaseMappings() { - Configuration conf = new Configuration(); - conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, condition1); - conf.set(serdeConstants.LIST_COLUMNS, "visitor_ID,sentiment,tracking_id"); - conf.set(JdbcStorageConfig.COLUMN_MAPPING.getPropertyName(), - "visitor_id=VID:date, sentiment=sentiment, tracking_id=tracking_id"); - String condition = QueryConditionBuilder.getInstance().buildCondition(conf); - - assertThat(condition, is(notNullValue())); - assertThat(condition, is(equalToIgnoringWhiteSpace("({d vid} = 'x')"))); - } - - - @Test - public void testMultipleConditions_noTranslation() { - Configuration conf = new Configuration(); - conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, condition2); - conf.set(serdeConstants.LIST_COLUMNS, "visitor_id,sentiment,tracking_id"); - String condition = QueryConditionBuilder.getInstance().buildCondition(conf); - - assertThat(condition, is(notNullValue())); - assertThat(condition, is(equalToIgnoringWhiteSpace("((visitor_id = 'x') and (sentiment = 'y'))"))); - } - - - @Test - public void testMultipleConditions_withTranslation() { - Configuration conf = new Configuration(); - conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, condition2); - conf.set(serdeConstants.LIST_COLUMNS, "visitor_id,sentiment,tracking_id"); - conf.set(JdbcStorageConfig.COLUMN_MAPPING.getPropertyName(), "visitor_id=v,sentiment=s,tracking_id=t"); - String condition = QueryConditionBuilder.getInstance().buildCondition(conf); - - assertThat(condition, is(notNullValue())); - assertThat(condition, is(equalToIgnoringWhiteSpace("((v = 'x') and (s = 'y'))"))); - } - - - @Test - public void testWithNullConf() { - String condition = QueryConditionBuilder.getInstance().buildCondition(null); - assertThat(condition, is(notNullValue())); - assertThat(condition.trim().isEmpty(), is(true)); - } - - - @Test - public void testWithUndefinedFilterExpr() { - Configuration conf = new Configuration(); - conf.set(serdeConstants.LIST_COLUMNS, "visitor_id,sentiment,tracking_id"); - conf.set(JdbcStorageConfig.COLUMN_MAPPING.getPropertyName(), "visitor_id=v,sentiment=s,tracking_id=t"); - String condition = QueryConditionBuilder.getInstance().buildCondition(conf); - - assertThat(condition, is(notNullValue())); - assertThat(condition.trim().isEmpty(), is(true)); - } - -} diff --git jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java index 8d5aa70446..32a476173c 100644 --- jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java +++ jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java @@ -18,6 +18,12 @@ package org.apache.hive.jdbc; +import org.apache.hadoop.hive.metastore.security.DelegationTokenIdentifier; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hive.service.rpc.thrift.TSetClientInfoResp; import org.apache.hive.service.rpc.thrift.TSetClientInfoReq; @@ -74,6 +80,7 @@ import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; import java.io.BufferedReader; +import java.io.DataInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -759,7 +766,23 @@ private String getClientDelegationToken(Map jdbcConnConf) if (JdbcConnectionParams.AUTH_TOKEN.equalsIgnoreCase(jdbcConnConf.get(JdbcConnectionParams.AUTH_TYPE))) { // check delegation token in job conf if any try { - tokenStr = SessionUtils.getTokenStrForm(HiveAuthConstants.HS2_CLIENT_TOKEN); + if (System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION) != null) { + try { + Credentials cred = new Credentials(); + DataInputStream dis = new DataInputStream(new FileInputStream(System.getenv(UserGroupInformation + .HADOOP_TOKEN_FILE_LOCATION))); + cred.readTokenStorageStream(dis); + dis.close(); + Token token = cred.getToken(new Text("hive")); + tokenStr = token.encodeToUrlString(); + } catch (IOException e) { + LOG.warn("Cannot get token from environment variable $HADOOP_TOKEN_FILE_LOCATION=" + + System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION)); + } + } + if (tokenStr == null) { + tokenStr = SessionUtils.getTokenStrForm(HiveAuthConstants.HS2_CLIENT_TOKEN); + } } catch (IOException e) { throw new SQLException("Error reading token ", e); } @@ -850,6 +873,7 @@ private boolean isSslConnection() { private boolean isKerberosAuthMode() { return !JdbcConnectionParams.AUTH_SIMPLE.equals(sessConfMap.get(JdbcConnectionParams.AUTH_TYPE)) + && !JdbcConnectionParams.AUTH_TOKEN.equals(sessConfMap.get(JdbcConnectionParams.AUTH_TYPE)) && sessConfMap.containsKey(JdbcConnectionParams.AUTH_PRINCIPAL); } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java index e012d7dbf9..62d7e55344 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java @@ -453,11 +453,13 @@ public Allocator getAllocator() { public void debugDumpShort(StringBuilder sb) { sb.append("\nORC cache state "); int allLocked = 0, allUnlocked = 0, allEvicted = 0, allMoving = 0; + long totalUsedSpace = 0; for (Map.Entry>> e : cache.entrySet()) { if (!e.getValue().incRef()) continue; try { int fileLocked = 0, fileUnlocked = 0, fileEvicted = 0, fileMoving = 0; + long fileMemoryUsage = 0; if (e.getValue().getCache().isEmpty()) continue; List lockedBufs = null; if (LlapIoImpl.LOCKING_LOGGER.isTraceEnabled()) { @@ -483,6 +485,7 @@ public void debugDumpShort(StringBuilder sb) { ++fileUnlocked; } } finally { + fileMemoryUsage += e2.getValue().allocSize; e2.getValue().decRef(); } } @@ -490,8 +493,21 @@ public void debugDumpShort(StringBuilder sb) { allUnlocked += fileUnlocked; allEvicted += fileEvicted; allMoving += fileMoving; - sb.append("\n file " + e.getKey() + ": " + fileLocked + " locked, " + fileUnlocked - + " unlocked, " + fileEvicted + " evicted, " + fileMoving + " being moved"); + totalUsedSpace += fileMemoryUsage; + + sb.append("\n file " + + e.getKey() + + ": " + + fileLocked + + " locked, " + + fileUnlocked + + " unlocked, " + + fileEvicted + + " evicted, " + + fileMoving + + " being moved," + + fileMemoryUsage + + " total used byte"); if (fileLocked > 0 && LlapIoImpl.LOCKING_LOGGER.isTraceEnabled()) { LlapIoImpl.LOCKING_LOGGER.trace("locked-buffers: {}", lockedBufs); } @@ -499,7 +515,16 @@ public void debugDumpShort(StringBuilder sb) { e.getValue().decRef(); } } - sb.append("\nORC cache summary: " + allLocked + " locked, " + allUnlocked + " unlocked, " - + allEvicted + " evicted, " + allMoving + " being moved"); + sb.append("\nORC cache summary: " + + allLocked + + " locked, " + + allUnlocked + + " unlocked, " + + allEvicted + + " evicted, " + + allMoving + + " being moved," + + totalUsedSpace + + "total used space"); } } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java index 45829dd5c4..aa5ad66314 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java @@ -20,11 +20,59 @@ import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; +/** + * Actor managing the eviction requests. + * Cache policy relies notifications from the actual {@link LowLevelCache} to keep track of buffer access. + */ public interface LowLevelCachePolicy extends LlapIoDebugDump { + + /** + * Signals to the policy the addition of a new page to the cache directory. + * + * @param buffer buffer to be cached + * @param priority the priority of cached element + */ void cache(LlapCacheableBuffer buffer, Priority priority); + + /** + * Notifies the policy that this buffer is locked, thus take it out of the free list. + * Note that this notification is a hint and can not be the source of truth about what can be evicted + * currently the source of truth is the counter of reference to the buffer see {@link LlapCacheableBuffer#isLocked()}. + * + * @param buffer buffer to be locked. + */ void notifyLock(LlapCacheableBuffer buffer); + + /** + * Notifies the policy that a buffer is unlocked after been used. This notification signals to the policy that an + * access to this page occurred thus can be used to track what page got a read request + * + * @param buffer buffer that just got unlocked + */ void notifyUnlock(LlapCacheableBuffer buffer); + + /** + * Signals to the policy that it has to evict some pages to make room incoming buffers. + * Policy has to at least evict the amount requested. + * Policy does not now about the shape of evicted buffers and only can reason about total size. + * Not that is method will block until at least {@code memoryToReserve} bytes are evicted. + * + * @param memoryToReserve amount of bytes to be evicted + * @return actual amount of evicted bytes. + */ long evictSomeBlocks(long memoryToReserve); + + /** + * Sets the eviction listener dispatcher. + * + * @param listener eviction listener actor + */ void setEvictionListener(EvictionListener listener); + + /** + * Signals to the policy to evict all the unlocked used buffers. + * + * @return amount (bytes) of memory evicted. + */ long purge(); } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java index e552fee534..704f2f14d3 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java @@ -181,7 +181,7 @@ public long purge() { try { LlapCacheableBuffer current = oldTail = listTail; while (current != null) { - boolean canEvict = LlapCacheableBuffer.INVALIDATE_OK != current.invalidate(); + boolean canEvict = LlapCacheableBuffer.INVALIDATE_OK == current.invalidate(); current.indexInHeap = LlapCacheableBuffer.NOT_IN_CACHE; if (canEvict) { current = current.prev; diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java index fedade5c9c..0d16703011 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java @@ -20,8 +20,41 @@ import java.util.concurrent.atomic.AtomicBoolean; +/** + * Memory Manager is an accountant over a fixed size of memory. + * It does is the following. + * 1 - tracks the amount of memory (bytes) reserved out of a given maximum size to be shared between IO Threads. + * 2 - when a reservation can not be fulfilled form the current free space it has to notify Evictor to free up some + * space. + *

+ * Note that it does not know about the actual shape, content or owners of memory, all it cares about is bytes usage. + */ public interface MemoryManager { + + /** + * Signals to the Memory manager the release of some memory bytes that are free to be used. + * + * @param memUsage amount of memory bytes that are released to be added to the ledger as free. + */ void releaseMemory(long memUsage); + + /** + * Sets the amount of bytes that the memory manager is managing. + * + * @param maxSize total amount of available bytes to be allocated. + */ void updateMaxSize(long maxSize); + + /** + * Reserves some amount of bytes within the managed pool of memory. + *

+ * Callers expect that the memory manager will always fulfill the request by notifying the Evictor about how much + * need to be evicted to accommodate the reserve request. + * Note that this method will block until reservation is fulfilled. + * + * @param memoryToReserve Amount of bytes to reserve. + * @param isStopped Caller state to indicate if it is still running while the memory manager is trying to + * allocate the space. + */ void reserveMemory(long memoryToReserve, AtomicBoolean isStopped); } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/MetadataCache.java llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/MetadataCache.java index 2b3bca6ec1..8400fe9841 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/MetadataCache.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/MetadataCache.java @@ -127,9 +127,22 @@ public void notifyEvicted(OrcFileEstimateErrors buffer) { @Override public void debugDumpShort(StringBuilder sb) { - // TODO: perhaps add counters for separate things and multiple buffer cases. - sb.append("\nMetadata cache state: ").append(metadata.size()).append( - " files and stripes, ").append(estimateErrors.size()).append(" files w/ORC estimate"); + sb.append("\nMetadata cache state: ") + .append(metadata.size()) + .append(" files and stripes, ") + .append(metadata.values().parallelStream().mapToLong(value -> { + if (value.getSingleLlapBuffer() != null) { + return value.getSingleLlapBuffer().allocSize; + } + long sum = 0; + for (LlapAllocatorBuffer llapMetadataBuffer : value.getMultipleLlapBuffers()) { + sum += llapMetadataBuffer.allocSize; + } + return sum; + }).sum()) + .append(" total used bytes, ") + .append(estimateErrors.size()) + .append(" files w/ORC estimate"); } @Override diff --git packaging/src/main/assembly/bin.xml packaging/src/main/assembly/bin.xml index fceb1be745..766161d66c 100644 --- packaging/src/main/assembly/bin.xml +++ packaging/src/main/assembly/bin.xml @@ -101,9 +101,12 @@ true org.apache.hadoop:* + org.apache.hive:hive-jdbc:jar:standalone + org.apache.httpcomponents:* org.apache.hive.hcatalog:hive-webhcat:* + org.apache.hive:hive-jdbc:jar diff --git pom.xml pom.xml index c2d8641497..23c441304a 100644 --- pom.xml +++ pom.xml @@ -217,6 +217,7 @@ 3.0.0 0.6.0 2.2.4 + 2.0.1 diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLDesc.java new file mode 100644 index 0000000000..66e5cb0fed --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLDesc.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl; + +/** + * Marker interface for all DDL operation descriptors. + */ +public interface DDLDesc { +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java new file mode 100644 index 0000000000..e349a0ac2c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl; + +import java.io.DataOutputStream; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Abstract ancestor class of all DDL Operation classes. + */ +public abstract class DDLOperation { + protected static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.DDLTask"); + + protected final DDLOperationContext context; + + public DDLOperation(DDLOperationContext context) { + this.context = context; + } + + public abstract int execute() throws HiveException; + + protected DataOutputStream getOutputStream(Path outputFile) throws HiveException { + try { + FileSystem fs = outputFile.getFileSystem(context.getConf()); + return fs.create(outputFile); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java new file mode 100644 index 0000000000..924f0b31cf --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; +import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter; + +/** + * Context for DDL operations. + */ +public class DDLOperationContext { + private final Hive db; + private final HiveConf conf; + private final DriverContext driverContext; + private final MetaDataFormatter formatter; + + public DDLOperationContext(HiveConf conf, DriverContext driverContext) throws HiveException { + this.db = Hive.get(conf); + this.conf = conf; + this.driverContext = driverContext; + this.formatter = MetaDataFormatUtils.getFormatter(conf); + } + + public Hive getDb() { + return db; + } + + public HiveConf getConf() { + return conf; + } + + public DriverContext getDriverContext() { + return driverContext; + } + + public MetaDataFormatter getFormatter() { + return formatter; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java new file mode 100644 index 0000000000..068e1e7a96 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl; + +import java.io.Serializable; +import java.lang.reflect.Constructor; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hive.ql.CompilationOpContext; +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.QueryPlan; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; +import org.apache.hadoop.hive.ql.plan.api.StageType; + +/** + * DDLTask implementation. +**/ +public final class DDLTask2 extends Task implements Serializable { + private static final long serialVersionUID = 1L; + + private static final Map, Class> DESC_TO_OPARATION = + new HashMap<>(); + public static void registerOperation(Class descClass, + Class operationClass) { + DESC_TO_OPARATION.put(descClass, operationClass); + } + + @Override + public boolean requireLock() { + return this.work != null && this.work.getNeedLock(); + } + + @Override + public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext ctx, + CompilationOpContext opContext) { + super.initialize(queryState, queryPlan, ctx, opContext); + } + + @Override + public int execute(DriverContext driverContext) { + if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { + return 0; + } + + try { + DDLDesc ddlDesc = work.getDDLDesc(); + + if (DESC_TO_OPARATION.containsKey(ddlDesc.getClass())) { + DDLOperationContext context = new DDLOperationContext(conf, driverContext); + Class ddlOpertaionClass = DESC_TO_OPARATION.get(ddlDesc.getClass()); + Constructor constructor = + ddlOpertaionClass.getConstructor(DDLOperationContext.class, ddlDesc.getClass()); + DDLOperation ddlOperation = constructor.newInstance(context, ddlDesc); + return ddlOperation.execute(); + } else { + throw new IllegalArgumentException("Unknown DDL request: " + ddlDesc.getClass()); + } + } catch (Throwable e) { + failed(e); + return 1; + } + } + + private void failed(Throwable e) { + while (e.getCause() != null && e.getClass() == RuntimeException.class) { + e = e.getCause(); + } + setException(e); + LOG.error("Failed", e); + } + + @Override + public StageType getType() { + return StageType.DDL; + } + + @Override + public String getName() { + return "DDL"; + } + + /* + uses the authorizer from SessionState will need some more work to get this to run in parallel, + however this should not be a bottle neck so might not need to parallelize this. + */ + @Override + public boolean canExecuteInParallel() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java new file mode 100644 index 0000000000..d2fbe8fede --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl; + +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; + +import java.io.Serializable; + +import java.util.Set; + +/** + * A DDL operation. + */ +public final class DDLWork2 implements Serializable { + private static final long serialVersionUID = 1L; + + private DDLDesc ddlDesc; + boolean needLock = false; + + /** ReadEntitites that are passed to the hooks. */ + protected Set inputs; + /** List of WriteEntities that are passed to the hooks. */ + protected Set outputs; + + public DDLWork2() { + } + + public DDLWork2(Set inputs, Set outputs) { + this.inputs = inputs; + this.outputs = outputs; + } + + public DDLWork2(Set inputs, Set outputs, DDLDesc ddlDesc) { + this(inputs, outputs); + this.ddlDesc = ddlDesc; + } + + public Set getInputs() { + return inputs; + } + + public Set getOutputs() { + return outputs; + } + + public boolean getNeedLock() { + return needLock; + } + + public void setNeedLock(boolean needLock) { + this.needLock = needLock; + } + + public DDLDesc getDDLDesc() { + return ddlDesc; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseDesc.java similarity index 59% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/AlterDatabaseDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseDesc.java index 347ed97bd8..547b3515c0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseDesc.java @@ -16,80 +16,91 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; import java.util.Map; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.plan.PrincipalDesc; /** - * AlterDatabaseDesc. - * + * DDL task description for ALTER DATABASE commands. */ @Explain(displayName = "Alter Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterDatabaseDesc extends DDLDesc implements Serializable { - +public class AlterDatabaseDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - // Only altering the database property and owner is currently supported - public static enum ALTER_DB_TYPES { - ALTER_PROPERTY, ALTER_OWNER, ALTER_LOCATION - }; - - ALTER_DB_TYPES alterType; - String databaseName; - Map dbProperties; - PrincipalDesc ownerPrincipal; - ReplicationSpec replicationSpec; - String location; + static { + DDLTask2.registerOperation(AlterDatabaseDesc.class, AlterDatabaseOperation.class); + } /** - * For serialization only. + * Supported type of alter db commands. + * Only altering the database property and owner is currently supported */ - public AlterDatabaseDesc() { - } + public enum AlterDbType { + ALTER_PROPERTY, ALTER_OWNER, ALTER_LOCATION + }; - public AlterDatabaseDesc(String databaseName, Map dbProps, - ReplicationSpec replicationSpec) { - super(); + private final AlterDbType alterType; + private final String databaseName; + private final Map dbProperties; + private final ReplicationSpec replicationSpec; + private final PrincipalDesc ownerPrincipal; + private final String location; + + public AlterDatabaseDesc(String databaseName, Map dbProperties, ReplicationSpec replicationSpec) { + this.alterType = AlterDbType.ALTER_PROPERTY; this.databaseName = databaseName; + this.dbProperties = dbProperties; this.replicationSpec = replicationSpec; - this.setDatabaseProperties(dbProps); - this.setAlterType(ALTER_DB_TYPES.ALTER_PROPERTY); + this.ownerPrincipal = null; + this.location = null; } - public AlterDatabaseDesc(String databaseName, PrincipalDesc ownerPrincipal, - ReplicationSpec replicationSpec) { + public AlterDatabaseDesc(String databaseName, PrincipalDesc ownerPrincipal, ReplicationSpec replicationSpec) { + this.alterType = AlterDbType.ALTER_OWNER; this.databaseName = databaseName; + this.dbProperties = null; this.replicationSpec = replicationSpec; - this.setOwnerPrincipal(ownerPrincipal); - this.setAlterType(ALTER_DB_TYPES.ALTER_OWNER); + this.ownerPrincipal = ownerPrincipal; + this.location = null; } - public AlterDatabaseDesc(String databaseName, String newLocation) { + public AlterDatabaseDesc(String databaseName, String location) { + this.alterType = AlterDbType.ALTER_LOCATION; this.databaseName = databaseName; - this.setLocation(newLocation); - this.setAlterType(ALTER_DB_TYPES.ALTER_LOCATION); - } - - @Explain(displayName="properties") - public Map getDatabaseProperties() { - return dbProperties; + this.dbProperties = null; + this.replicationSpec = null; + this.ownerPrincipal = null; + this.location = location; } - public void setDatabaseProperties(Map dbProps) { - this.dbProperties = dbProps; + public AlterDbType getAlterType() { + return alterType; } - @Explain(displayName="name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="name", explainLevels = {Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getDatabaseName() { return databaseName; } - public void setDatabaseName(String databaseName) { - this.databaseName = databaseName; + @Explain(displayName="properties") + public Map getDatabaseProperties() { + return dbProperties; + } + + /** + * @return what kind of replication scope this alter is running under. + * This can result in a "ALTER IF NEWER THAN" kind of semantic + */ + public ReplicationSpec getReplicationSpec() { + return this.replicationSpec; } @Explain(displayName="owner") @@ -97,31 +108,8 @@ public PrincipalDesc getOwnerPrincipal() { return ownerPrincipal; } - public void setOwnerPrincipal(PrincipalDesc ownerPrincipal) { - this.ownerPrincipal = ownerPrincipal; - } - @Explain(displayName="location") public String getLocation() { return location; } - - public void setLocation(String location) { - this.location = location; - } - public ALTER_DB_TYPES getAlterType() { - return alterType; - } - - public void setAlterType(ALTER_DB_TYPES alterType) { - this.alterType = alterType; - } - - /** - * @return what kind of replication scope this alter is running under. - * This can result in a "ALTER IF NEWER THAN" kind of semantic - */ - public ReplicationSpec getReplicationSpec() { - return this.replicationSpec; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseOperation.java new file mode 100644 index 0000000000..a3bc7b000f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseOperation.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of altering a database. + */ +public class AlterDatabaseOperation extends DDLOperation { + private final AlterDatabaseDesc desc; + + public AlterDatabaseOperation(DDLOperationContext context, AlterDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + String dbName = desc.getDatabaseName(); + Database database = context.getDb().getDatabase(dbName); + if (database == null) { + throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); + } + + Map params = database.getParameters(); + if ((null != desc.getReplicationSpec()) && + !desc.getReplicationSpec().allowEventReplacementInto(params)) { + LOG.debug("DDLTask: Alter Database {} is skipped as database is newer than update", dbName); + return 0; // no replacement, the existing database state is newer than our update. + } + + switch (desc.getAlterType()) { + case ALTER_PROPERTY: + alterProperties(database, params); + break; + + case ALTER_OWNER: + alterOwner(database); + break; + + case ALTER_LOCATION: + alterLocation(database); + break; + + default: + throw new AssertionError("Unsupported alter database type! : " + desc.getAlterType()); + } + + context.getDb().alterDatabase(database.getName(), database); + return 0; + } + + private void alterProperties(Database database, Map params) { + Map newParams = desc.getDatabaseProperties(); + + // if both old and new params are not null, merge them + if (params != null && newParams != null) { + params.putAll(newParams); + database.setParameters(params); + } else { + // if one of them is null, replace the old params with the new one + database.setParameters(newParams); + } + } + + private void alterOwner(Database database) { + database.setOwnerName(desc.getOwnerPrincipal().getName()); + database.setOwnerType(desc.getOwnerPrincipal().getType()); + } + + private void alterLocation(Database database) throws HiveException { + try { + String newLocation = desc.getLocation(); + URI locationURI = new URI(newLocation); + if (!locationURI.isAbsolute() || StringUtils.isBlank(locationURI.getScheme())) { + throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation); + } + + if (newLocation.equals(database.getLocationUri())) { + LOG.info("AlterDatabase skipped. No change in location."); + } else { + database.setLocationUri(newLocation); + } + } catch (URISyntaxException e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseDesc.java similarity index 67% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseDesc.java index f2e6a77083..2ba8b186ed 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseDesc.java @@ -16,42 +16,40 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; import java.util.Map; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * CreateDatabaseDesc. - * + * DDL task description for CREATE DATABASE commands. */ @Explain(displayName = "Create Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateDatabaseDesc extends DDLDesc implements Serializable { - +public class CreateDatabaseDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String databaseName; - String locationUri; - String comment; - boolean ifNotExists; - Map dbProperties; - - /** - * For serialization only. - */ - public CreateDatabaseDesc() { + static { + DDLTask2.registerOperation(CreateDatabaseDesc.class, CreateDatabaseOperation.class); } - public CreateDatabaseDesc(String databaseName, String comment, - String locationUri, boolean ifNotExists) { - super(); + private final String databaseName; + private final String comment; + private final String locationUri; + private final boolean ifNotExists; + private final Map dbProperties; + + public CreateDatabaseDesc(String databaseName, String comment, String locationUri, boolean ifNotExists, + Map dbProperties) { this.databaseName = databaseName; this.comment = comment; this.locationUri = locationUri; this.ifNotExists = ifNotExists; - this.dbProperties = null; + this.dbProperties = dbProperties; } @Explain(displayName="if not exists", displayOnlyOnTrue = true) @@ -59,42 +57,22 @@ public boolean getIfNotExists() { return ifNotExists; } - public void setIfNotExists(boolean ifNotExists) { - this.ifNotExists = ifNotExists; - } - public Map getDatabaseProperties() { return dbProperties; } - public void setDatabaseProperties(Map dbProps) { - this.dbProperties = dbProps; - } - @Explain(displayName="name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getName() { return databaseName; } - public void setName(String databaseName) { - this.databaseName = databaseName; - } - @Explain(displayName="comment") public String getComment() { return comment; } - public void setComment(String comment) { - this.comment = comment; - } - @Explain(displayName="locationUri") public String getLocationUri() { return locationUri; } - - public void setLocationUri(String locationUri) { - this.locationUri = locationUri; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseOperation.java new file mode 100644 index 0000000000..ed8da551ff --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseOperation.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Operation process of creating a database. + */ +public class CreateDatabaseOperation extends DDLOperation { + private static final String DATABASE_PATH_SUFFIX = ".db"; + + private final CreateDatabaseDesc desc; + + public CreateDatabaseOperation(DDLOperationContext context, CreateDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Database database = new Database(); + database.setName(desc.getName()); + database.setDescription(desc.getComment()); + database.setLocationUri(desc.getLocationUri()); + database.setParameters(desc.getDatabaseProperties()); + database.setOwnerName(SessionState.getUserFromAuthenticator()); + database.setOwnerType(PrincipalType.USER); + + try { + makeLocationQualified(database); + context.getDb().createDatabase(database, desc.getIfNotExists()); + } catch (AlreadyExistsException ex) { + //it would be better if AlreadyExistsException had an errorCode field.... + throw new HiveException(ex, ErrorMsg.DATABASE_ALREADY_EXISTS, desc.getName()); + } + + return 0; + } + + private void makeLocationQualified(Database database) throws HiveException { + if (database.isSetLocationUri()) { + database.setLocationUri(Utilities.getQualifiedPath(context.getConf(), new Path(database.getLocationUri()))); + } else { + // Location is not set we utilize METASTOREWAREHOUSE together with database name + Path path = new Path(HiveConf.getVar(context.getConf(), HiveConf.ConfVars.METASTOREWAREHOUSE), + database.getName().toLowerCase() + DATABASE_PATH_SUFFIX); + String qualifiedPath = Utilities.getQualifiedPath(context.getConf(), path); + database.setLocationUri(qualifiedPath); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseDesc.java similarity index 60% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseDesc.java index 6bc34eda0f..948e24bd4f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseDesc.java @@ -16,93 +16,51 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * DescDatabaseDesc. - * + * DDL task description for DESC DATABASE commands. */ @Explain(displayName = "Describe Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DescDatabaseDesc extends DDLDesc implements Serializable { - +public class DescDatabaseDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String dbName; - String resFile; - boolean isExt; + public static final String DESC_DATABASE_SCHEMA = + "db_name,comment,location,owner_name,owner_type,parameters#string:string:string:string:string:string"; - /** - * thrift ddl for the result of describe database. - */ - private static final String schema = "db_name,comment,location,owner_name,owner_type,parameters#string:string:string:string:string:string"; - - public DescDatabaseDesc() { + static { + DDLTask2.registerOperation(DescDatabaseDesc.class, DescDatabaseOperation.class); } - /** - * @param resFile - * @param dbName - * @param isExt - */ + private final String resFile; + private final String dbName; + private final boolean isExt; + public DescDatabaseDesc(Path resFile, String dbName, boolean isExt) { this.isExt = isExt; this.resFile = resFile.toString(); this.dbName = dbName; } - public static String getSchema() { - return schema; - } - - /** - * @return the isExt - */ public boolean isExt() { return isExt; } - /** - * @param isExt - * the isExt to set - */ - public void setExt(boolean isExt) { - this.isExt = isExt; - } - - /** - * @return the tableName - */ @Explain(displayName = "database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getDatabaseName() { return dbName; } - /** - * @param db - * the database name to set - */ - public void setDatabaseName(String db) { - this.dbName = db; - } - - /** - * @return the resFile - */ @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java new file mode 100644 index 0000000000..efaf389ee8 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import java.io.DataOutputStream; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of describing a database. + */ +public class DescDatabaseOperation extends DDLOperation { + private final DescDatabaseDesc desc; + + public DescDatabaseOperation(DDLOperationContext context, DescDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + try (DataOutputStream outStream = getOutputStream(new Path(desc.getResFile()))) { + Database database = context.getDb().getDatabase(desc.getDatabaseName()); + if (database == null) { + throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, desc.getDatabaseName()); + } + + Map params = null; + if (desc.isExt()) { + params = database.getParameters(); + } + + // If this is a q-test, let's order the params map (lexicographically) by + // key. This is to get consistent param ordering between Java7 and Java8. + if (HiveConf.getBoolVar(context.getConf(), HiveConf.ConfVars.HIVE_IN_TEST) && params != null) { + params = new TreeMap(params); + } + + String location = database.getLocationUri(); + if (HiveConf.getBoolVar(context.getConf(), HiveConf.ConfVars.HIVE_IN_TEST)) { + location = "location/in/test"; + } + + PrincipalType ownerType = database.getOwnerType(); + context.getFormatter().showDatabaseDescription(outStream, database.getName(), database.getDescription(), + location, database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DropDatabaseDesc.java similarity index 74% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DropDatabaseDesc.java index deaa7cde1d..933c3c652f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DropDatabaseDesc.java @@ -16,35 +16,37 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; - /** - * DropDatabaseDesc. - * + * DDL task description for DROP DATABASE commands. */ @Explain(displayName = "Drop Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropDatabaseDesc extends DDLDesc implements Serializable { +public class DropDatabaseDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String databaseName; - boolean ifExists; - boolean cascade; - ReplicationSpec replicationSpec; + static { + DDLTask2.registerOperation(DropDatabaseDesc.class, DropDatabaseOperation.class); + } + + private final String databaseName; + private final boolean ifExists; + private final boolean cascade; + private final ReplicationSpec replicationSpec; - public DropDatabaseDesc(String databaseName, boolean ifExists, - ReplicationSpec replicationSpec) { + public DropDatabaseDesc(String databaseName, boolean ifExists, ReplicationSpec replicationSpec) { this(databaseName, ifExists, false, replicationSpec); } - public DropDatabaseDesc(String databaseName, boolean ifExists, boolean cascade, - ReplicationSpec replicationSpec) { - super(); + public DropDatabaseDesc(String databaseName, boolean ifExists, boolean cascade, ReplicationSpec replicationSpec) { this.databaseName = databaseName; this.ifExists = ifExists; this.cascade = cascade; @@ -56,27 +58,15 @@ public String getDatabaseName() { return databaseName; } - public void setDatabaseName(String databaseName) { - this.databaseName = databaseName; - } - @Explain(displayName = "if exists") public boolean getIfExists() { return ifExists; } - public void setIfExists(boolean ifExists) { - this.ifExists = ifExists; - } - public boolean isCasdade() { return cascade; } - public void setIsCascade(boolean cascade) { - this.cascade = cascade; - } - public ReplicationSpec getReplicationSpec() { return replicationSpec; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DropDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DropDatabaseOperation.java new file mode 100644 index 0000000000..d9e50083f0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DropDatabaseOperation.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +/** + * Operation process of creating a database. + */ +public class DropDatabaseOperation extends DDLOperation { + private final DropDatabaseDesc desc; + + public DropDatabaseOperation(DDLOperationContext context, DropDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + try { + String dbName = desc.getDatabaseName(); + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + if (replicationSpec.isInReplicationScope()) { + Database database = context.getDb().getDatabase(dbName); + if (database == null || !replicationSpec.allowEventReplacementInto(database.getParameters())) { + return 0; + } + } + + context.getDb().dropDatabase(dbName, true, desc.getIfExists(), desc.isCasdade()); + // Unregister the functions as well + if (desc.isCasdade()) { + FunctionRegistry.unregisterPermanentFunctions(dbName); + } + } catch (NoSuchObjectException ex) { + throw new HiveException(ex, ErrorMsg.DATABASE_NOT_EXISTS, desc.getDatabaseName()); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LockDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/LockDatabaseDesc.java similarity index 73% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/LockDatabaseDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/LockDatabaseDesc.java index 08ce59ee51..5194c890ac 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LockDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/LockDatabaseDesc.java @@ -16,32 +16,36 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; -import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * LockDatabaseDesc. - * + * DDL task description for LOCK DATABASE commands. */ @Explain(displayName = "Lock Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class LockDatabaseDesc extends DDLDesc implements Serializable { +public class LockDatabaseDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private String databaseName; - private String mode; - private String queryId; - private String queryStr; - - public LockDatabaseDesc() { + static { + DDLTask2.registerOperation(LockDatabaseDesc.class, LockDatabaseOperation.class); } - public LockDatabaseDesc(String databaseName, String mode, String queryId) { + private final String databaseName; + private final String mode; + private final String queryId; + private final String queryStr; + + public LockDatabaseDesc(String databaseName, String mode, String queryId, String queryStr) { this.databaseName = databaseName; this.mode = mode; this.queryId = queryId; + this.queryStr = queryStr; } @Explain(displayName = "database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -49,14 +53,6 @@ public String getDatabaseName() { return databaseName; } - public void setDatabaseName(String databaseName) { - this.databaseName = databaseName; - } - - public void setMode(String mode) { - this.mode = mode; - } - public String getMode() { return mode; } @@ -65,15 +61,7 @@ public String getQueryId() { return queryId; } - public void setQueryId(String queryId) { - this.queryId = queryId; - } - public String getQueryStr() { return queryStr; } - - public void setQueryStr(String queryStr) { - this.queryStr = queryStr; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/LockDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/LockDatabaseOperation.java new file mode 100644 index 0000000000..cf01a31785 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/LockDatabaseOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of locking a database. + */ +public class LockDatabaseOperation extends DDLOperation { + private final LockDatabaseDesc desc; + + public LockDatabaseOperation(DDLOperationContext context, LockDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Context ctx = context.getDriverContext().getCtx(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + return txnManager.lockDatabase(context.getDb(), desc); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesDesc.java similarity index 59% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesDesc.java index 8724c747ab..4814fd3e8c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowDatabasesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesDesc.java @@ -16,91 +16,50 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; - /** - * ShowDatabasesDesc. - * + * DDL task description for SHOW DATABASES commands. */ @Explain(displayName = "Show Databases", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowDatabasesDesc extends DDLDesc implements Serializable { +public class ShowDatabasesDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String pattern; - String resFile; - - /** - * table name for the result of show databases. - */ - private static final String table = "show_databases"; - /** - * thrift ddl for the result of show databases. - */ - private static final String schema = "database_name#string"; + /** Thrift ddl for the result of show databases. */ + public static final String SHOW_DATABASES_SCHEMA = "database_name#string"; - public String getTable() { - return table; + static { + DDLTask2.registerOperation(ShowDatabasesDesc.class, ShowDatabasesOperation.class); } - public String getSchema() { - return schema; - } - - public ShowDatabasesDesc() { - } + private final String resFile; + private final String pattern; - /** - * @param resFile - */ public ShowDatabasesDesc(Path resFile) { this.resFile = resFile.toString(); - pattern = null; + this.pattern = null; } - /** - * @param pattern - * names of databases to show - */ public ShowDatabasesDesc(Path resFile, String pattern) { this.resFile = resFile.toString(); this.pattern = pattern; } - /** - * @return the pattern - */ @Explain(displayName = "pattern") public String getPattern() { return pattern; } - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the resFile - */ @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java new file mode 100644 index 0000000000..30c4db8f3c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import java.io.DataOutputStream; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.io.IOUtils; + +/** + * Operation process of locking a database. + */ +public class ShowDatabasesOperation extends DDLOperation { + private final ShowDatabasesDesc desc; + + public ShowDatabasesOperation(DDLOperationContext context, ShowDatabasesDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // get the databases for the desired pattern - populate the output stream + List databases = null; + if (desc.getPattern() != null) { + LOG.debug("pattern: {}", desc.getPattern()); + databases = context.getDb().getDatabasesByPattern(desc.getPattern()); + } else { + databases = context.getDb().getAllDatabases(); + } + + LOG.info("Found {} database(s) matching the SHOW DATABASES statement.", databases.size()); + + // write the results in the file + DataOutputStream outStream = getOutputStream(new Path(desc.getResFile())); + try { + context.getFormatter().showDatabases(outStream, databases); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show databases"); + } finally { + IOUtils.closeStream(outStream); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/SwitchDatabaseDesc.java similarity index 75% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/SwitchDatabaseDesc.java index da7d20062c..258b0366dc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/SwitchDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/SwitchDatabaseDesc.java @@ -16,28 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; -import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * SwitchDatabaseDesc. - * + * DDL task description for USE commands. */ @Explain(displayName = "Switch Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class SwitchDatabaseDesc extends DDLDesc implements Serializable { - +public class SwitchDatabaseDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String databaseName; - - public SwitchDatabaseDesc() { + static { + DDLTask2.registerOperation(SwitchDatabaseDesc.class, SwitchDatabaseOperation.class); } + private final String databaseName; + public SwitchDatabaseDesc(String databaseName) { - super(); this.databaseName = databaseName; } @@ -45,8 +46,4 @@ public SwitchDatabaseDesc(String databaseName) { public String getDatabaseName() { return databaseName; } - - public void setDatabaseName(String databaseName) { - this.databaseName = databaseName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/SwitchDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/SwitchDatabaseOperation.java new file mode 100644 index 0000000000..6e3d4faaf1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/SwitchDatabaseOperation.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import java.util.Map; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Operation process of switching to another database. + */ +public class SwitchDatabaseOperation extends DDLOperation { + private final SwitchDatabaseDesc desc; + + public SwitchDatabaseOperation(DDLOperationContext context, SwitchDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + String dbName = desc.getDatabaseName(); + if (!context.getDb().databaseExists(dbName)) { + throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); + } + + SessionState.get().setCurrentDatabase(dbName); + + // set database specific parameters + Database database = context.getDb().getDatabase(dbName); + assert(database != null); + + Map dbParams = database.getParameters(); + if (dbParams != null) { + for (HiveConf.ConfVars var: HiveConf.dbVars) { + String newValue = dbParams.get(var.varname); + if (newValue != null) { + LOG.info("Changing {} from {} to {}", var.varname, context.getConf().getVar(var), newValue); + context.getConf().setVar(var, newValue); + } + } + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/UnlockDatabaseDesc.java similarity index 74% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockDatabaseDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/UnlockDatabaseDesc.java index a1cb7976e7..f105bfaa81 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/UnlockDatabaseDesc.java @@ -16,21 +16,27 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.database; import java.io.Serializable; -import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * UnlockDatabaseDesc. - * + * DDL task description for UNLOCK DATABASE commands. */ @Explain(displayName = "Unlock Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class UnlockDatabaseDesc extends DDLDesc implements Serializable { +public class UnlockDatabaseDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private String databaseName; + static { + DDLTask2.registerOperation(UnlockDatabaseDesc.class, UnlockDatabaseOperation.class); + } + + private final String databaseName; public UnlockDatabaseDesc(String databaseName) { this.databaseName = databaseName; @@ -40,9 +46,4 @@ public UnlockDatabaseDesc(String databaseName) { public String getDatabaseName() { return databaseName; } - - public void setDatabaseName(String databaseName) { - this.databaseName = databaseName; - } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/UnlockDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/UnlockDatabaseOperation.java new file mode 100644 index 0000000000..91ca0b00d9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/UnlockDatabaseOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of locking a database. + */ +public class UnlockDatabaseOperation extends DDLOperation { + private final UnlockDatabaseDesc desc; + + public UnlockDatabaseOperation(DDLOperationContext context, UnlockDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Context ctx = context.getDriverContext().getCtx(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + return txnManager.unlockDatabase(context.getDb(), desc); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/package-info.java new file mode 100644 index 0000000000..53d733aaf7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Database related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.database; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/package-info.java new file mode 100644 index 0000000000..9e79c3678d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** DDL operations. */ +package org.apache.hadoop.hive.ql.ddl; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 0bfff08a80..76339f1b82 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -79,7 +79,6 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CompactionResponse; @@ -177,7 +176,6 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc; import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; @@ -188,7 +186,6 @@ import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.ColStatistics; -import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; @@ -198,10 +195,8 @@ import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; -import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; @@ -214,7 +209,6 @@ import org.apache.hadoop.hive.ql.plan.KillQueryDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; -import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; @@ -233,7 +227,6 @@ import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; -import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; @@ -243,10 +236,8 @@ import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; -import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; @@ -353,41 +344,6 @@ public int execute(DriverContext driverContext) { try { db = Hive.get(conf); - CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc(); - if (null != createDatabaseDesc) { - return createDatabase(db, createDatabaseDesc); - } - - DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc(); - if (dropDatabaseDesc != null) { - return dropDatabase(db, dropDatabaseDesc); - } - - LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc(); - if (lockDatabaseDesc != null) { - return lockDatabase(db, lockDatabaseDesc); - } - - UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc(); - if (unlockDatabaseDesc != null) { - return unlockDatabase(db, unlockDatabaseDesc); - } - - SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc(); - if (switchDatabaseDesc != null) { - return switchDatabase(db, switchDatabaseDesc); - } - - DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc(); - if (descDatabaseDesc != null) { - return descDatabase(db, descDatabaseDesc); - } - - AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc(); - if (alterDatabaseDesc != null) { - return alterDatabase(db, alterDatabaseDesc); - } - CreateTableDesc crtTbl = work.getCreateTblDesc(); if (crtTbl != null) { return createTable(db, crtTbl); @@ -464,11 +420,6 @@ public int execute(DriverContext driverContext) { return describeFunction(db, descFunc); } - ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc(); - if (showDatabases != null) { - return showDatabases(db, showDatabases); - } - ShowTablesDesc showTbls = work.getShowTblsDesc(); if (showTbls != null) { return showTablesOrViews(db, showTbls); @@ -1180,69 +1131,6 @@ private void writeListToFileAfterSort(List entries, String resFile) thro writeToFile(sb.toString(), resFile); } - private int alterDatabase(Hive db, AlterDatabaseDesc alterDbDesc) throws HiveException { - - String dbName = alterDbDesc.getDatabaseName(); - Database database = db.getDatabase(dbName); - if (database == null) { - throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); - } - - Map params = database.getParameters(); - if ((null != alterDbDesc.getReplicationSpec()) - && !alterDbDesc.getReplicationSpec().allowEventReplacementInto(params)) { - LOG.debug("DDLTask: Alter Database {} is skipped as database is newer than update", dbName); - return 0; // no replacement, the existing database state is newer than our update. - } - - switch (alterDbDesc.getAlterType()) { - case ALTER_PROPERTY: - Map newParams = alterDbDesc.getDatabaseProperties(); - - // if both old and new params are not null, merge them - if (params != null && newParams != null) { - params.putAll(newParams); - database.setParameters(params); - } else { - // if one of them is null, replace the old params with the new one - database.setParameters(newParams); - } - break; - - case ALTER_OWNER: - database.setOwnerName(alterDbDesc.getOwnerPrincipal().getName()); - database.setOwnerType(alterDbDesc.getOwnerPrincipal().getType()); - break; - - case ALTER_LOCATION: - try { - String newLocation = alterDbDesc.getLocation(); - URI locationURI = new URI(newLocation); - if ( !locationURI.isAbsolute() - || StringUtils.isBlank(locationURI.getScheme())) { - throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation); - } - if (newLocation.equals(database.getLocationUri())) { - LOG.info("AlterDatabase skipped. No change in location."); - } - else { - database.setLocationUri(newLocation); - } - } - catch (URISyntaxException e) { - throw new HiveException(e); - } - break; - - default: - throw new AssertionError("Unsupported alter database type! : " + alterDbDesc.getAlterType()); - } - - db.alterDatabase(database.getName(), database); - return 0; - } - - /** * Alters a materialized view. * @@ -2512,39 +2400,6 @@ public static StringBuilder appendSerdeParams( return builder; } - - /** - * Write a list of the available databases to a file. - * - * @param showDatabasesDesc - * These are the databases we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException { - // get the databases for the desired pattern - populate the output stream - List databases = null; - if (showDatabasesDesc.getPattern() != null) { - LOG.debug("pattern: {}", showDatabasesDesc.getPattern()); - databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern()); - } else { - databases = db.getAllDatabases(); - } - LOG.info("Found {} database(s) matching the SHOW DATABASES statement.", databases.size()); - - // write the results in the file - DataOutputStream outStream = getOutputStream(showDatabasesDesc.getResFile()); - try { - formatter.showDatabases(outStream, databases); - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show databases"); - } finally { - IOUtils.closeStream(outStream); - } - return 0; - } - /** * Write a list of the tables/views in the database to a file. * @@ -3110,36 +2965,6 @@ private int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException { return txnManager.lockTable(db, lockTbl); } - /** - * Lock the database - * - * @param lockDb - * the database to be locked along with the mode - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int lockDatabase(Hive db, LockDatabaseDesc lockDb) throws HiveException { - Context ctx = driverContext.getCtx(); - HiveTxnManager txnManager = ctx.getHiveTxnManager(); - return txnManager.lockDatabase(db, lockDb); - } - - /** - * Unlock the database specified - * - * @param unlockDb - * the database to be unlocked - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int unlockDatabase(Hive db, UnlockDatabaseDesc unlockDb) throws HiveException { - Context ctx = driverContext.getCtx(); - HiveTxnManager txnManager = ctx.getHiveTxnManager(); - return txnManager.unlockDatabase(db, unlockDb); - } - /** * Unlock the table/partition specified * @param db @@ -3230,43 +3055,6 @@ private int describeFunction(Hive db, DescFunctionDesc descFunc) throws HiveExce return 0; } - private int descDatabase(Hive db, DescDatabaseDesc descDatabase) throws HiveException { - DataOutputStream outStream = getOutputStream(descDatabase.getResFile()); - try { - Database database = db.getDatabase(descDatabase.getDatabaseName()); - - if (database == null) { - throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, descDatabase.getDatabaseName()); - } - Map params = null; - if (descDatabase.isExt()) { - params = database.getParameters(); - } - - // If this is a q-test, let's order the params map (lexicographically) by - // key. This is to get consistent param ordering between Java7 and Java8. - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST) && - params != null) { - params = new TreeMap(params); - } - - String location = database.getLocationUri(); - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) { - location = "location/in/test"; - } - PrincipalType ownerType = database.getOwnerType(); - formatter.showDatabaseDescription(outStream, database.getName(), - database.getDescription(), location, - database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params); - - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR); - } finally { - IOUtils.closeStream(outStream); - } - return 0; - } - /** * Write the status of tables to a file. * @@ -4607,96 +4395,6 @@ public static void validateSerDe(String serdeName, HiveConf conf) throws HiveExc } } - /** - * Create a Database - * @param db - * @param crtDb - * @return Always returns 0 - * @throws HiveException - */ - private int createDatabase(Hive db, CreateDatabaseDesc crtDb) - throws HiveException { - Database database = new Database(); - database.setName(crtDb.getName()); - database.setDescription(crtDb.getComment()); - database.setLocationUri(crtDb.getLocationUri()); - database.setParameters(crtDb.getDatabaseProperties()); - database.setOwnerName(SessionState.getUserFromAuthenticator()); - database.setOwnerType(PrincipalType.USER); - try { - makeLocationQualified(database); - db.createDatabase(database, crtDb.getIfNotExists()); - } - catch (AlreadyExistsException ex) { - //it would be better if AlreadyExistsException had an errorCode field.... - throw new HiveException(ex, ErrorMsg.DATABASE_ALREADY_EXISTS, crtDb.getName()); - } - return 0; - } - - /** - * Drop a Database - * @param db - * @param dropDb - * @return Always returns 0 - * @throws HiveException - */ - private int dropDatabase(Hive db, DropDatabaseDesc dropDb) - throws HiveException { - try { - String dbName = dropDb.getDatabaseName(); - ReplicationSpec replicationSpec = dropDb.getReplicationSpec(); - if (replicationSpec.isInReplicationScope()) { - Database database = db.getDatabase(dbName); - if (database == null - || !replicationSpec.allowEventReplacementInto(database.getParameters())) { - return 0; - } - } - db.dropDatabase(dbName, true, dropDb.getIfExists(), dropDb.isCasdade()); - // Unregister the functions as well - if (dropDb.isCasdade()) { - FunctionRegistry.unregisterPermanentFunctions(dbName); - } - } catch (NoSuchObjectException ex) { - throw new HiveException(ex, ErrorMsg.DATABASE_NOT_EXISTS, dropDb.getDatabaseName()); - } - return 0; - } - - /** - * Switch to a different Database - * @param db - * @param switchDb - * @return Always returns 0 - * @throws HiveException - */ - private int switchDatabase(Hive db, SwitchDatabaseDesc switchDb) - throws HiveException { - String dbName = switchDb.getDatabaseName(); - if (!db.databaseExists(dbName)) { - throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); - } - SessionState.get().setCurrentDatabase(dbName); - - // set database specific parameters - Database database = db.getDatabase(dbName); - assert(database != null); - Map dbParams = database.getParameters(); - if (dbParams != null) { - for (HiveConf.ConfVars var: HiveConf.dbVars) { - String newValue = dbParams.get(var.varname); - if (newValue != null) { - LOG.info("Changing {} from {} to {}", var.varname, conf.getVar(var), - newValue); - conf.setVar(var, newValue); - } - } - } - - return 0; - } - /** * Create a new table. * @@ -5132,25 +4830,6 @@ public static void makeLocationQualified(String databaseName, Table table, HiveC } } - /** - * Make qualified location for a database . - * - * @param database - * Database. - */ - public static final String DATABASE_PATH_SUFFIX = ".db"; - private void makeLocationQualified(Database database) throws HiveException { - if (database.isSetLocationUri()) { - database.setLocationUri(Utilities.getQualifiedPath(conf, new Path(database.getLocationUri()))); - } - else { - // Location is not set we utilize METASTOREWAREHOUSE together with database name - database.setLocationUri( - Utilities.getQualifiedPath(conf, new Path(HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE), - database.getName().toLowerCase() + DATABASE_PATH_SUFFIX))); - } - } - /** * Validate if the given table/partition is eligible for update * diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index 1a412a3a86..d6dc9a9acc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -534,6 +534,7 @@ system.registerGenericUDTF("posexplode", GenericUDTFPosExplode.class); system.registerGenericUDTF("stack", GenericUDTFStack.class); system.registerGenericUDTF("get_splits", GenericUDTFGetSplits.class); + system.registerGenericUDTF("get_sql_schema", GenericUDTFGetSQLSchema.class); //PTF declarations system.registerGenericUDF(LEAD_FUNC_NAME, GenericUDFLead.class); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index e2feb3f988..583460f2c4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -500,8 +500,7 @@ private int getSize(int pos, Class c, Field f) { || c.isInstance(Short.valueOf((short) 0)) || c.isInstance(Integer.valueOf(0)) || c.isInstance(Long.valueOf(0)) - || c.isInstance(new Float(0)) - || c.isInstance(new Double(0))) { + || c.isInstance(Float.valueOf(0)) || c.isInstance(Double.valueOf(0))) { return javaSizePrimitiveType; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 8e6fdc0707..6ac695d502 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -23,6 +23,8 @@ import java.util.List; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask; import org.apache.hadoop.hive.ql.exec.repl.ReplDumpTask; @@ -88,6 +90,7 @@ public TaskTuple(Class workClass, Class> taskClass) { taskvec.add(new TaskTuple(CopyWork.class, CopyTask.class)); taskvec.add(new TaskTuple(ReplCopyWork.class, ReplCopyTask.class)); taskvec.add(new TaskTuple(DDLWork.class, DDLTask.class)); + taskvec.add(new TaskTuple(DDLWork2.class, DDLTask2.class)); taskvec.add(new TaskTuple( MaterializedViewDesc.class, MaterializedViewTask.class)); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java index 9ec820d500..d2ca33d96c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java @@ -646,7 +646,7 @@ private void computeReducerTimeStatsPerJob(RunningJob rj) throws IOException { Map exctractedCounters = new HashMap(); for (Counters.Group cg : counters) { for (Counter c : cg) { - exctractedCounters.put(cg.getName() + "::" + c.getName(), new Double(c.getCounter())); + exctractedCounters.put(cg.getName() + "::" + c.getName(), Double.valueOf(c.getCounter())); } } return exctractedCounters; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java index d6ccf5872e..c7828db1df 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java @@ -20,6 +20,9 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.database.AlterDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.DatabaseEvent; @@ -28,9 +31,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.PrincipalDesc; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.ReplLoadOpType; @@ -123,17 +123,13 @@ private boolean isDbEmpty(String dbName) throws HiveException { } private Task createDbTask(Database dbObj) { - CreateDatabaseDesc createDbDesc = new CreateDatabaseDesc(); - createDbDesc.setName(dbObj.getName()); - createDbDesc.setComment(dbObj.getDescription()); - createDbDesc.setDatabaseProperties(updateDbProps(dbObj, context.dumpDirectory, !isTableLevelLoad)); - // note that we do not set location - for repl load, we want that auto-created. - createDbDesc.setIfNotExists(false); + CreateDatabaseDesc createDbDesc = new CreateDatabaseDesc(dbObj.getName(), dbObj.getDescription(), null, false, + updateDbProps(dbObj, context.dumpDirectory, !isTableLevelLoad)); // If it exists, we want this to be an error condition. Repl Load is not intended to replace a // db. // TODO: we might revisit this in create-drop-recreate cases, needs some thinking on. - DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), createDbDesc); + DDLWork2 work = new DDLWork2(new HashSet<>(), new HashSet<>(), createDbDesc); return TaskFactory.get(work, context.hiveConf); } @@ -143,10 +139,9 @@ private boolean isDbEmpty(String dbName) throws HiveException { } private Task setOwnerInfoTask(Database dbObj) { - AlterDatabaseDesc alterDbDesc = new AlterDatabaseDesc(dbObj.getName(), - new PrincipalDesc(dbObj.getOwnerName(), dbObj.getOwnerType()), - null); - DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc); + AlterDatabaseDesc alterDbDesc = new AlterDatabaseDesc(dbObj.getName(), new PrincipalDesc(dbObj.getOwnerName(), + dbObj.getOwnerType()), null); + DDLWork2 work = new DDLWork2(new HashSet<>(), new HashSet<>(), alterDbDesc); return TaskFactory.get(work, context.hiveConf); } @@ -176,9 +171,8 @@ private boolean isDbEmpty(String dbName) throws HiveException { private static Task alterDbTask(String dbName, Map props, HiveConf hiveConf) { - AlterDatabaseDesc alterDbDesc = - new AlterDatabaseDesc(dbName, props, null); - DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc); + AlterDatabaseDesc alterDbDesc = new AlterDatabaseDesc(dbName, props, null); + DDLWork2 work = new DDLWork2(new HashSet<>(), new HashSet<>(), alterDbDesc); return TaskFactory.get(work, hiveConf); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index 393883367b..4ed215ce2f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.ReplLastIdInfo; import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.database.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; @@ -45,7 +47,6 @@ import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger; import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; -import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; @@ -302,7 +303,7 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), replState); AlterDatabaseDesc alterDbDesc = new AlterDatabaseDesc(dbName, mapProp, new ReplicationSpec(replState, replState)); - Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc), conf); + Task updateReplIdTask = TaskFactory.get(new DDLWork2(inputs, outputs, alterDbDesc), conf); // Link the update repl state task with dependency collection task if (preCursor != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/spark/MapInput.java ql/src/java/org/apache/hadoop/hive/ql/exec/spark/MapInput.java index b242f57db8..55096ad113 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/spark/MapInput.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/MapInput.java @@ -100,7 +100,7 @@ public String getName() { @Override public Boolean isCacheEnable() { - return new Boolean(toCache); + return Boolean.valueOf(toCache); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/spark/ShuffleTran.java ql/src/java/org/apache/hadoop/hive/ql/exec/spark/ShuffleTran.java index f69807954b..26dcf7f545 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/spark/ShuffleTran.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/ShuffleTran.java @@ -71,7 +71,7 @@ public String getName() { @Override public Boolean isCacheEnable() { - return new Boolean(toCache); + return Boolean.valueOf(toCache); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index d9340d0371..1d38f99854 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -511,13 +511,17 @@ private EdgeProperty createEdgeProperty(Vertex w, TezEdgeProperty edgeProp, case CUSTOM_SIMPLE_EDGE: assert partitionerClassName != null; partitionerConf = createPartitionerConf(partitionerClassName, conf); - UnorderedPartitionedKVEdgeConfig et3Conf = UnorderedPartitionedKVEdgeConfig + UnorderedPartitionedKVEdgeConfig.Builder et3Conf = UnorderedPartitionedKVEdgeConfig .newBuilder(keyClass, valClass, MRPartitioner.class.getName(), partitionerConf) .setFromConfiguration(conf) .setKeySerializationClass(TezBytesWritableSerialization.class.getName(), null) - .setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null) - .build(); - return et3Conf.createDefaultEdgeProperty(); + .setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null); + if (edgeProp.getBufferSize() != null) { + et3Conf.setAdditionalConfiguration( + TezRuntimeConfiguration.TEZ_RUNTIME_UNORDERED_OUTPUT_BUFFER_SIZE_MB, + edgeProp.getBufferSize().toString()); + } + return et3Conf.build().createDefaultEdgeProperty(); case ONE_TO_ONE_EDGE: UnorderedKVEdgeConfig et4Conf = UnorderedKVEdgeConfig .newBuilder(keyClass, valClass) diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 9ddd30c4be..4aea8722b6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -39,14 +39,14 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryPlan; +import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 12c10273a9..616277fa8b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -19,18 +19,17 @@ import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; -import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.LockResponse; import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Driver.LockedDriverState; +import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java index 623b0376c6..0abec56654 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Driver.LockedDriverState; +import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -36,9 +38,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; /** diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java index be34673b92..58a74900ef 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java @@ -72,6 +72,7 @@ protected HiveRelMdRowCount() { super(); } + @Override public Double getRowCount(Join join, RelMetadataQuery mq) { // Try to infer from constraints first final Pair constraintBasedResult = @@ -135,11 +136,11 @@ public Double getRowCount(Sort rel, RelMetadataQuery mq) { if (rowCount != null && rel.fetch != null) { final int offset = rel.offset == null ? 0 : RexLiteral.intValue(rel.offset); final int limit = RexLiteral.intValue(rel.fetch); - final Double offsetLimit = new Double(offset + limit); + final int offsetLimit = offset + limit; // offsetLimit is smaller than rowCount of the input operator // thus, we return the offsetLimit if (offsetLimit < rowCount) { - return offsetLimit; + return Double.valueOf(offsetLimit); } } return rowCount; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java index 97097381d9..c1cd34478d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java @@ -71,7 +71,7 @@ private HiveRelMdSize() {} list.add(columnStatistic.getAvgColLen()); } } else { - list.add(new Double(0)); + list.add(Double.valueOf(0)); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java index 7ff92edd91..40cfcf5a8f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java @@ -154,7 +154,7 @@ private void findPossibleAutoConvertedJoinOperators() throws SemanticException { aliasTotalKnownInputSize += size; Long es = aliasToSize.get(alias); if(es == null) { - es = new Long(0); + es = Long.valueOf(0); } es += size; aliasToSize.put(alias, es); @@ -207,6 +207,7 @@ private void findPossibleAutoConvertedJoinOperators() throws SemanticException { * current parse context * @throws SemanticException */ + @Override public ParseContext transform(ParseContext pctx) throws SemanticException { pCtx = pctx; @@ -253,7 +254,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { private void analyzeReduceSinkOperatorsOfJoinOperator(JoinCondDesc[] joinConds, List> rsOps, Operator curentRsOp, Set correlatedRsOps) { - if (correlatedRsOps.contains((ReduceSinkOperator) curentRsOp)) { + if (correlatedRsOps.contains(curentRsOp)) { return; } correlatedRsOps.add((ReduceSinkOperator) curentRsOp); @@ -569,6 +570,7 @@ private boolean sameOrder(String order1, String order2) { return reduceSinkOperators; } + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs) throws SemanticException { CorrelationNodeProcCtx corrCtx = (CorrelationNodeProcCtx) ctx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java index ed5ca5e642..0b5de81158 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java @@ -140,7 +140,7 @@ public long getTotalKnownInputSize(Context context, MapWork currWork, aliasTotalKnownInputSize += size; Long es = aliasToSize.get(alias); if (es == null) { - es = new Long(0); + es = Long.valueOf(0); } es += size; aliasToSize.put(alias, es); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java index 0a94254be8..0053997830 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java @@ -382,63 +382,56 @@ private boolean checkAggregators(Collection aggs) { public Object process(Node n, Stack s, NodeProcessorCtx c, Object... os) { LOG.debug("Cannot run operator [" + n + "] in llap mode."); - return new Boolean(false); + return Boolean.FALSE; } }); - opRules.put(new RuleRegExp("No user code in fil", FilterOperator.getOperatorName() + "%"), - new NodeProcessor() { - @Override - public Object process(Node n, Stack s, NodeProcessorCtx c, - Object... os) { - ExprNodeDesc expr = ((FilterOperator)n).getConf().getPredicate(); - Boolean retval = new Boolean(checkExpression(expr)); - if (!retval) { - LOG.info("Cannot run filter operator [" + n + "] in llap mode"); - } - return new Boolean(retval); + opRules.put(new RuleRegExp("No user code in fil", FilterOperator.getOperatorName() + "%"), new NodeProcessor() { + @Override + public Object process(Node n, Stack s, NodeProcessorCtx c, Object... os) { + ExprNodeDesc expr = ((FilterOperator) n).getConf().getPredicate(); + boolean retval = checkExpression(expr); + if (!retval) { + LOG.info("Cannot run filter operator [" + n + "] in llap mode"); } - }); - opRules.put(new RuleRegExp("No user code in gby", GroupByOperator.getOperatorName() + "%"), - new NodeProcessor() { - @Override - public Object process(Node n, Stack s, NodeProcessorCtx c, - Object... os) { - @SuppressWarnings("unchecked") - List aggs = ((Operator) n).getConf().getAggregators(); - Boolean retval = new Boolean(checkAggregators(aggs)); - if (!retval) { - LOG.info("Cannot run group by operator [" + n + "] in llap mode"); - } - return new Boolean(retval); + return Boolean.valueOf(retval); + } + }); + opRules.put(new RuleRegExp("No user code in gby", GroupByOperator.getOperatorName() + "%"), new NodeProcessor() { + @Override + public Object process(Node n, Stack s, NodeProcessorCtx c, Object... os) { + @SuppressWarnings("unchecked") + List aggs = ((Operator) n).getConf().getAggregators(); + boolean retval = checkAggregators(aggs); + if (!retval) { + LOG.info("Cannot run group by operator [" + n + "] in llap mode"); } - }); + return Boolean.valueOf(retval); + } + }); opRules.put(new RuleRegExp("No user code in select", SelectOperator.getOperatorName() + "%"), new NodeProcessor() { - @Override - public Object process(Node n, Stack s, NodeProcessorCtx c, - Object... os) { - @SuppressWarnings({ "unchecked" }) - List exprs = ((Operator) n).getConf().getColList(); - Boolean retval = new Boolean(checkExpressions(exprs)); - if (!retval) { - LOG.info("Cannot run select operator [" + n + "] in llap mode"); + @Override + public Object process(Node n, Stack s, NodeProcessorCtx c, Object... os) { + @SuppressWarnings({"unchecked"}) + List exprs = ((Operator) n).getConf().getColList(); + boolean retval = checkExpressions(exprs); + if (!retval) { + LOG.info("Cannot run select operator [" + n + "] in llap mode"); + } + return Boolean.valueOf(retval); } - return new Boolean(retval); - } - }); + }); if (!conf.getBoolVar(HiveConf.ConfVars.LLAP_ENABLE_GRACE_JOIN_IN_LLAP)) { - opRules.put( - new RuleRegExp("Disable grace hash join if LLAP mode and not dynamic partition hash join", - MapJoinOperator.getOperatorName() + "%"), new NodeProcessor() { + opRules.put(new RuleRegExp("Disable grace hash join if LLAP mode and not dynamic partition hash join", + MapJoinOperator.getOperatorName() + "%"), new NodeProcessor() { @Override public Object process(Node n, Stack s, NodeProcessorCtx c, Object... os) { MapJoinOperator mapJoinOp = (MapJoinOperator) n; - if (mapJoinOp.getConf().isHybridHashJoin() - && !(mapJoinOp.getConf().isDynamicPartitionHashJoin())) { + if (mapJoinOp.getConf().isHybridHashJoin() && !(mapJoinOp.getConf().isDynamicPartitionHashJoin())) { mapJoinOpList.add((MapJoinOperator) n); } - return new Boolean(true); + return Boolean.TRUE; } }); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapPreVectorizationPass.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapPreVectorizationPass.java index 73a01d131a..ec066efae4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapPreVectorizationPass.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapPreVectorizationPass.java @@ -101,17 +101,15 @@ private void handleWork(TezWork tezWork, BaseWork work) // In LLAP only mode, grace hash join will be disabled later on by the LlapDispatcher anyway. // Since the presence of Grace Hash Join disables some "native" vectorization optimizations, // we will disable the grace hash join now, before vectorization is done. - opRules.put( - new RuleRegExp("Disable grace hash join if LLAP mode and not dynamic partition hash join", - MapJoinOperator.getOperatorName() + "%"), new NodeProcessor() { + opRules.put(new RuleRegExp("Disable grace hash join if LLAP mode and not dynamic partition hash join", + MapJoinOperator.getOperatorName() + "%"), new NodeProcessor() { @Override public Object process(Node n, Stack s, NodeProcessorCtx c, Object... os) { MapJoinOperator mapJoinOp = (MapJoinOperator) n; - if (mapJoinOp.getConf().isHybridHashJoin() - && !(mapJoinOp.getConf().isDynamicPartitionHashJoin())) { + if (mapJoinOp.getConf().isHybridHashJoin() && !(mapJoinOp.getConf().isDynamicPartitionHashJoin())) { mapJoinOp.getConf().setHybridHashJoin(false); } - return new Boolean(true); + return Boolean.TRUE; } }); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index d27a913c74..4a542aef85 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -70,6 +70,15 @@ import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.database.AlterDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.CreateDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.DescDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.ShowDatabasesDesc; +import org.apache.hadoop.hive.ql.ddl.database.SwitchDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -101,7 +110,6 @@ import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc; import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc.AlterMaterializedViewTypes; import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; @@ -114,7 +122,6 @@ import org.apache.hadoop.hive.ql.plan.BasicStatsWork; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; -import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; @@ -123,10 +130,8 @@ import org.apache.hadoop.hive.ql.plan.DDLDesc; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; -import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; @@ -140,7 +145,6 @@ import org.apache.hadoop.hive.ql.plan.KillQueryDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; -import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; @@ -153,7 +157,6 @@ import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; -import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; @@ -164,10 +167,8 @@ import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; -import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; @@ -787,7 +788,7 @@ private void analyzeAlterDatabaseProperties(ASTNode ast) throws SemanticExceptio private void addAlterDbDesc(AlterDatabaseDesc alterDesc) throws SemanticException { Database database = getDatabase(alterDesc.getDatabaseName()); outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterDesc))); } private void analyzeAlterDatabaseOwner(ASTNode ast) throws SemanticException { @@ -1361,16 +1362,11 @@ private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { } } - CreateDatabaseDesc createDatabaseDesc = - new CreateDatabaseDesc(dbName, dbComment, dbLocation, ifNotExists); - if (dbProps != null) { - createDatabaseDesc.setDatabaseProperties(dbProps); - } + CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(dbName, dbComment, dbLocation, ifNotExists, dbProps); Database database = new Database(dbName, dbComment, dbLocation, dbProps); outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - createDatabaseDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), createDatabaseDesc))); } private void analyzeDropDatabase(ASTNode ast) throws SemanticException { @@ -1414,9 +1410,8 @@ private void analyzeDropDatabase(ASTNode ast) throws SemanticException { inputs.add(new ReadEntity(database)); outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_EXCLUSIVE)); - DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists, ifCascade, - new ReplicationSpec()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc))); + DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists, ifCascade, new ReplicationSpec()); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), dropDatabaseDesc))); } private void analyzeSwitchDatabase(ASTNode ast) throws SemanticException { @@ -1426,8 +1421,7 @@ private void analyzeSwitchDatabase(ASTNode ast) throws SemanticException { dbReadEntity.noLockNeeded(); inputs.add(dbReadEntity); SwitchDatabaseDesc switchDatabaseDesc = new SwitchDatabaseDesc(dbName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - switchDatabaseDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), switchDatabaseDesc))); } @@ -2571,11 +2565,10 @@ private void analyzeDescDatabase(ASTNode ast) throws SemanticException { throw new SemanticException("Unexpected Tokens at DESCRIBE DATABASE"); } - DescDatabaseDesc descDbDesc = new DescDatabaseDesc(ctx.getResFile(), - dbName, isExtended); + DescDatabaseDesc descDbDesc = new DescDatabaseDesc(ctx.getResFile(), dbName, isExtended); inputs.add(new ReadEntity(getDatabase(dbName))); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), descDbDesc))); - setFetchTask(createFetchTask(descDbDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), descDbDesc))); + setFetchTask(createFetchTask(DescDatabaseDesc.DESC_DATABASE_SCHEMA)); } public static HashMap getPartSpec(ASTNode partspec) @@ -2658,8 +2651,8 @@ private void analyzeShowDatabases(ASTNode ast) throws SemanticException { } else { showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile()); } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showDatabasesDesc))); - setFetchTask(createFetchTask(showDatabasesDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showDatabasesDesc))); + setFetchTask(createFetchTask(ShowDatabasesDesc.SHOW_DATABASES_SCHEMA)); } private void analyzeShowTables(ASTNode ast) throws SemanticException { @@ -3131,10 +3124,9 @@ private void analyzeLockDatabase(ASTNode ast) throws SemanticException { // DDL_NO_LOCK here, otherwise it will conflict with Hive's transaction. outputs.add(new WriteEntity(getDatabase(dbName), WriteType.DDL_NO_LOCK)); - LockDatabaseDesc lockDatabaseDesc = new LockDatabaseDesc(dbName, mode, - HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); - lockDatabaseDesc.setQueryStr(ctx.getCmd()); - DDLWork work = new DDLWork(getInputs(), getOutputs(), lockDatabaseDesc); + LockDatabaseDesc lockDatabaseDesc = new LockDatabaseDesc(dbName, mode, HiveConf.getVar(conf, ConfVars.HIVEQUERYID), + ctx.getCmd()); + DDLWork2 work = new DDLWork2(getInputs(), getOutputs(), lockDatabaseDesc); rootTasks.add(TaskFactory.get(work)); ctx.setNeedLockMgr(true); } @@ -3150,7 +3142,7 @@ private void analyzeUnlockDatabase(ASTNode ast) throws SemanticException { outputs.add(new WriteEntity(getDatabase(dbName), WriteType.DDL_NO_LOCK)); UnlockDatabaseDesc unlockDatabaseDesc = new UnlockDatabaseDesc(dbName); - DDLWork work = new DDLWork(getInputs(), getOutputs(), unlockDatabaseDesc); + DDLWork2 work = new DDLWork2(getInputs(), getOutputs(), unlockDatabaseDesc); rootTasks.add(TaskFactory.get(work)); // Need to initialize the lock manager ctx.setNeedLockMgr(true); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index 94879c9529..564fdca111 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.FilterOperator; +import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; @@ -83,6 +84,7 @@ public static ReduceWork createReduceWork( context.conf.getFloatVar(HiveConf.ConfVars.TEZ_MAX_PARTITION_FACTOR); float minPartitionFactor = context.conf.getFloatVar(HiveConf.ConfVars.TEZ_MIN_PARTITION_FACTOR); long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); + int defaultTinyBufferSize = context.conf.getIntVar(HiveConf.ConfVars.TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB); ReduceWork reduceWork = new ReduceWork(Utilities.REDUCENAME + context.nextSequenceNumber()); LOG.debug("Adding reduce work (" + reduceWork.getName() + ") for " + root); @@ -142,6 +144,7 @@ public static ReduceWork createReduceWork( edgeProp = new TezEdgeProperty(edgeType); edgeProp.setSlowStart(reduceWork.isSlowStart()); } + edgeProp.setBufferSize(obtainBufferSize(root, reduceSink, defaultTinyBufferSize)); reduceWork.setEdgePropRef(edgeProp); tezWork.connect( @@ -850,4 +853,23 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, egw.startWalking(startNodes, outputMap); return outputMap; } + + private static Integer obtainBufferSize(Operator op, ReduceSinkOperator rsOp, int defaultTinyBufferSize) { + if (op instanceof GroupByOperator) { + GroupByOperator groupByOperator = (GroupByOperator) op; + if (groupByOperator.getConf().getKeys().isEmpty() && + groupByOperator.getConf().getMode() == GroupByDesc.Mode.MERGEPARTIAL) { + // Check configuration and value is -1, infer value + int result = defaultTinyBufferSize == -1 ? + (int) Math.ceil((double) groupByOperator.getStatistics().getDataSize() / 1E6) : + defaultTinyBufferSize; + if (LOG.isDebugEnabled()) { + LOG.debug("Buffer size for output from operator {} can be set to {}Mb", rsOp, result); + } + return result; + } + } + return null; + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java index c5dfe7e9bf..7162375cdf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java @@ -20,13 +20,13 @@ import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.database.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.plan.PrincipalDesc; @@ -76,16 +76,14 @@ } newDbProps.put(key, entry.getValue()); } - alterDbDesc = new AlterDatabaseDesc(actualDbName, - newDbProps, context.eventOnlyReplicationSpec()); + alterDbDesc = new AlterDatabaseDesc(actualDbName, newDbProps, context.eventOnlyReplicationSpec()); } else { - alterDbDesc = new AlterDatabaseDesc(actualDbName, - new PrincipalDesc(newDb.getOwnerName(), newDb.getOwnerType()), + alterDbDesc = new AlterDatabaseDesc(actualDbName, new PrincipalDesc(newDb.getOwnerName(), newDb.getOwnerType()), context.eventOnlyReplicationSpec()); } - Task alterDbTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, alterDbDesc), context.hiveConf); + Task alterDbTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, alterDbDesc), context.hiveConf); context.log.debug("Added alter database task : {}:{}", alterDbTask.getId(), actualDbName); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java index f8d8d1a696..41b6db6e9d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java @@ -23,20 +23,19 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.database.AlterDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; -import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.PrincipalDesc; import java.io.IOException; import java.io.Serializable; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -58,23 +57,22 @@ context.dbName == null ? db.getName() : context.dbName; CreateDatabaseDesc createDatabaseDesc = - new CreateDatabaseDesc(destinationDBName, db.getDescription(), null, true); - createDatabaseDesc.setDatabaseProperties(db.getParameters()); - Task createDBTask = TaskFactory.get( - new DDLWork(new HashSet<>(), new HashSet<>(), createDatabaseDesc), context.hiveConf); + new CreateDatabaseDesc(destinationDBName, db.getDescription(), null, true, db.getParameters()); + Task createDBTask = TaskFactory.get( + new DDLWork2(new HashSet<>(), new HashSet<>(), createDatabaseDesc), context.hiveConf); if (!db.getParameters().isEmpty()) { AlterDatabaseDesc alterDbDesc = new AlterDatabaseDesc(destinationDBName, db.getParameters(), context.eventOnlyReplicationSpec()); - Task alterDbProperties = TaskFactory - .get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc), context.hiveConf); + Task alterDbProperties = TaskFactory + .get(new DDLWork2(new HashSet<>(), new HashSet<>(), alterDbDesc), context.hiveConf); createDBTask.addDependentTask(alterDbProperties); } if (StringUtils.isNotEmpty(db.getOwnerName())) { AlterDatabaseDesc alterDbOwner = new AlterDatabaseDesc(destinationDBName, new PrincipalDesc(db.getOwnerName(), db.getOwnerType()), context.eventOnlyReplicationSpec()); - Task alterDbTask = TaskFactory - .get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbOwner), context.hiveConf); + Task alterDbTask = TaskFactory + .get(new DDLWork2(new HashSet<>(), new HashSet<>(), alterDbOwner), context.hiveConf); createDBTask.addDependentTask(alterDbTask); } updatedMetadata diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java index 8b11a9ed42..fd1e0e1f2a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message; import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import java.io.Serializable; import java.util.Collections; @@ -36,11 +36,9 @@ DropDatabaseMessage msg = deserializer.getDropDatabaseMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; - DropDatabaseDesc desc = - new DropDatabaseDesc(actualDbName, true, context.eventOnlyReplicationSpec()); + DropDatabaseDesc desc = new DropDatabaseDesc(actualDbName, true, context.eventOnlyReplicationSpec()); Task dropDBTask = - TaskFactory - .get(new DDLWork(new HashSet<>(), new HashSet<>(), desc), context.hiveConf); + TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), desc), context.hiveConf); context.log.info( "Added drop database task : {}:{}", dropDBTask.getId(), desc.getDatabaseName()); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index eb6011fc90..6527e52cae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -37,17 +37,11 @@ private PreInsertTableDesc preInsertTableDesc; private InsertCommitHookDesc insertCommitHookDesc; private AlterMaterializedViewDesc alterMVDesc; - private CreateDatabaseDesc createDatabaseDesc; - private SwitchDatabaseDesc switchDatabaseDesc; - private DropDatabaseDesc dropDatabaseDesc; - private LockDatabaseDesc lockDatabaseDesc; - private UnlockDatabaseDesc unlockDatabaseDesc; private CreateTableDesc createTblDesc; private CreateTableLikeDesc createTblLikeDesc; private CreateViewDesc createVwDesc; private DropTableDesc dropTblDesc; private AlterTableDesc alterTblDesc; - private ShowDatabasesDesc showDatabasesDesc; private ShowTablesDesc showTblsDesc; private ShowColumnsDesc showColumnsDesc; private ShowTblPropertiesDesc showTblPropertiesDesc; @@ -68,8 +62,6 @@ private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; private ShowTableStatusDesc showTblStatusDesc; - private DescDatabaseDesc descDbDesc; - private AlterDatabaseDesc alterDbDesc; private AlterTableAlterPartDesc alterTableAlterPartDesc; private TruncateTableDesc truncateTblDesc; private AlterTableExchangePartition alterTableExchangePartition; @@ -123,33 +115,6 @@ public DDLWork(HashSet inputs, HashSet outputs) { this.outputs = outputs; } - /** - * @param createDatabaseDesc - * Create Database descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - CreateDatabaseDesc createDatabaseDesc) { - this(inputs, outputs); - this.createDatabaseDesc = createDatabaseDesc; - } - - /** - * @param inputs - * @param outputs - * @param descDatabaseDesc Database descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - DescDatabaseDesc descDatabaseDesc) { - this(inputs, outputs); - this.descDbDesc = descDatabaseDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - AlterDatabaseDesc alterDbDesc) { - this(inputs, outputs); - this.alterDbDesc = alterDbDesc; - } - public DDLWork(HashSet inputs, HashSet outputs, TruncateTableDesc truncateTblDesc) { this(inputs, outputs); @@ -162,30 +127,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.showConfDesc = showConfDesc; } - public DescDatabaseDesc getDescDatabaseDesc() { - return descDbDesc; - } - - /** - * @param dropDatabaseDesc - * Drop Database descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - DropDatabaseDesc dropDatabaseDesc) { - this(inputs, outputs); - this.dropDatabaseDesc = dropDatabaseDesc; - } - - /** - * @param switchDatabaseDesc - * Switch Database descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - SwitchDatabaseDesc switchDatabaseDesc) { - this(inputs, outputs); - this.switchDatabaseDesc = switchDatabaseDesc; - } - /** * @param alterTblDesc * alter table descriptor @@ -260,16 +201,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.descTblDesc = descTblDesc; } - /** - * @param showDatabasesDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowDatabasesDesc showDatabasesDesc) { - this(inputs, outputs); - - this.showDatabasesDesc = showDatabasesDesc; - } - /** * @param showTblsDesc */ @@ -310,24 +241,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.unlockTblDesc = unlockTblDesc; } - /** - * @param lockDatabaseDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - LockDatabaseDesc lockDatabaseDesc) { - this(inputs, outputs); - this.lockDatabaseDesc = lockDatabaseDesc; - } - - /** - * @param unlockDatabaseDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - UnlockDatabaseDesc unlockDatabaseDesc) { - this(inputs, outputs); - this.unlockDatabaseDesc = unlockDatabaseDesc; - } - /** * @param showFuncsDesc */ @@ -620,67 +533,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.replSetFirstIncLoadFlagDesc = replSetFirstIncLoadFlagDesc; } - /** - * @return Create Database descriptor - */ - public CreateDatabaseDesc getCreateDatabaseDesc() { - return createDatabaseDesc; - } - - /** - * Set Create Database descriptor - * @param createDatabaseDesc - */ - public void setCreateDatabaseDesc(CreateDatabaseDesc createDatabaseDesc) { - this.createDatabaseDesc = createDatabaseDesc; - } - - /** - * @return Drop Database descriptor - */ - public DropDatabaseDesc getDropDatabaseDesc() { - return dropDatabaseDesc; - } - - /** - * Set Drop Database descriptor - * @param dropDatabaseDesc - */ - public void setDropDatabaseDesc(DropDatabaseDesc dropDatabaseDesc) { - this.dropDatabaseDesc = dropDatabaseDesc; - } - - /** - * @return Switch Database descriptor - */ - public SwitchDatabaseDesc getSwitchDatabaseDesc() { - return switchDatabaseDesc; - } - - /** - * Set Switch Database descriptor - * @param switchDatabaseDesc - */ - public void setSwitchDatabaseDesc(SwitchDatabaseDesc switchDatabaseDesc) { - this.switchDatabaseDesc = switchDatabaseDesc; - } - - public LockDatabaseDesc getLockDatabaseDesc() { - return lockDatabaseDesc; - } - - public void setLockDatabaseDesc(LockDatabaseDesc lockDatabaseDesc) { - this.lockDatabaseDesc = lockDatabaseDesc; - } - - public UnlockDatabaseDesc getUnlockDatabaseDesc() { - return unlockDatabaseDesc; - } - - public void setUnlockDatabaseDesc(UnlockDatabaseDesc unlockDatabaseDesc) { - this.unlockDatabaseDesc = unlockDatabaseDesc; - } - /** * @return the createTblDesc */ @@ -689,14 +541,6 @@ public CreateTableDesc getCreateTblDesc() { return createTblDesc; } - /** - * @param createTblDesc - * the createTblDesc to set - */ - public void setCreateTblDesc(CreateTableDesc createTblDesc) { - this.createTblDesc = createTblDesc; - } - /** * @return the createTblDesc */ @@ -705,14 +549,6 @@ public CreateTableLikeDesc getCreateTblLikeDesc() { return createTblLikeDesc; } - /** - * @param createTblLikeDesc - * the createTblDesc to set - */ - public void setCreateTblLikeDesc(CreateTableLikeDesc createTblLikeDesc) { - this.createTblLikeDesc = createTblLikeDesc; - } - /** * @return the createTblDesc */ @@ -721,14 +557,6 @@ public CreateViewDesc getCreateViewDesc() { return createVwDesc; } - /** - * @param createVwDesc - * the createViewDesc to set - */ - public void setCreateViewDesc(CreateViewDesc createVwDesc) { - this.createVwDesc = createVwDesc; - } - /** * @return the dropTblDesc */ @@ -737,14 +565,6 @@ public DropTableDesc getDropTblDesc() { return dropTblDesc; } - /** - * @param dropTblDesc - * the dropTblDesc to set - */ - public void setDropTblDesc(DropTableDesc dropTblDesc) { - this.dropTblDesc = dropTblDesc; - } - /** * @return the alterTblDesc */ @@ -753,13 +573,6 @@ public AlterTableDesc getAlterTblDesc() { return alterTblDesc; } - /** - * @param alterTblDesc - * the alterTblDesc to set - */ - public void setAlterTblDesc(AlterTableDesc alterTblDesc) { - this.alterTblDesc = alterTblDesc; - } /** * @return the alterMVDesc @@ -769,30 +582,6 @@ public AlterMaterializedViewDesc getAlterMaterializedViewDesc() { return alterMVDesc; } - /** - * @param alterMVDesc - * the alterMVDesc to set - */ - public void setAlterMVDesc(AlterMaterializedViewDesc alterMVDesc) { - this.alterMVDesc = alterMVDesc; - } - - /** - * @return the showDatabasesDesc - */ - @Explain(displayName = "Show Databases Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowDatabasesDesc getShowDatabasesDesc() { - return showDatabasesDesc; - } - - /** - * @param showDatabasesDesc - * the showDatabasesDesc to set - */ - public void setShowDatabasesDesc(ShowDatabasesDesc showDatabasesDesc) { - this.showDatabasesDesc = showDatabasesDesc; - } - /** * @return the showTblsDesc */ @@ -801,14 +590,6 @@ public ShowTablesDesc getShowTblsDesc() { return showTblsDesc; } - /** - * @param showTblsDesc - * the showTblsDesc to set - */ - public void setShowTblsDesc(ShowTablesDesc showTblsDesc) { - this.showTblsDesc = showTblsDesc; - } - /** * @return the showColumnsDesc */ @@ -817,14 +598,6 @@ public ShowColumnsDesc getShowColumnsDesc() { return showColumnsDesc; } - /** - * @param showColumnsDesc - * the showColumnsDesc to set - */ - public void setShowColumnsDesc(ShowColumnsDesc showColumnsDesc) { - this.showColumnsDesc = showColumnsDesc; - } - /** * @return the showFuncsDesc */ @@ -885,62 +658,6 @@ public KillQueryDesc getKillQueryDesc() { return killQueryDesc; } - /** - * @param showFuncsDesc - * the showFuncsDesc to set - */ - public void setShowFuncsDesc(ShowFunctionsDesc showFuncsDesc) { - this.showFuncsDesc = showFuncsDesc; - } - - /** - * @param showLocksDesc - * the showLocksDesc to set - */ - public void setShowLocksDesc(ShowLocksDesc showLocksDesc) { - this.showLocksDesc = showLocksDesc; - } - - public void setShowCompactionsDesc(ShowCompactionsDesc showCompactionsDesc) { - this.showCompactionsDesc = showCompactionsDesc; - } - - public void setShowTxnsDesc(ShowTxnsDesc showTxnsDesc) { - this.showTxnsDesc = showTxnsDesc; - } - - public void setAbortTxnsDesc(AbortTxnsDesc abortTxnsDesc) { - this.abortTxnsDesc = abortTxnsDesc; - } - - public void setKillQueryDesc(KillQueryDesc killQueryDesc) { - this.killQueryDesc = killQueryDesc; - } - - /** - * @param lockTblDesc - * the lockTblDesc to set - */ - public void setLockTblDesc(LockTableDesc lockTblDesc) { - this.lockTblDesc = lockTblDesc; - } - - /** - * @param unlockTblDesc - * the unlockTblDesc to set - */ - public void setUnlockTblDesc(UnlockTableDesc unlockTblDesc) { - this.unlockTblDesc = unlockTblDesc; - } - - /** - * @param descFuncDesc - * the showFuncsDesc to set - */ - public void setDescFuncDesc(DescFunctionDesc descFuncDesc) { - descFunctionDesc = descFuncDesc; - } - /** * @return the showPartsDesc */ @@ -949,24 +666,12 @@ public ShowPartitionsDesc getShowPartsDesc() { return showPartsDesc; } - /** - * @param showPartsDesc - * the showPartsDesc to set - */ - public void setShowPartsDesc(ShowPartitionsDesc showPartsDesc) { - this.showPartsDesc = showPartsDesc; - } - @Explain(displayName = "Show Create Database Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public ShowCreateDatabaseDesc getShowCreateDbDesc() { return showCreateDbDesc; } - public void setShowCreateDbDesc(ShowCreateDatabaseDesc showCreateDbDesc) { - this.showCreateDbDesc = showCreateDbDesc; - } - /** * @return the showCreateTblDesc */ @@ -975,14 +680,6 @@ public ShowCreateTableDesc getShowCreateTblDesc() { return showCreateTblDesc; } - /** - * @param showCreateTblDesc - * the showCreateTblDesc to set - */ - public void setShowCreateTblDesc(ShowCreateTableDesc showCreateTblDesc) { - this.showCreateTblDesc = showCreateTblDesc; - } - /** * @return the descTblDesc */ @@ -991,14 +688,6 @@ public DescTableDesc getDescTblDesc() { return descTblDesc; } - /** - * @param descTblDesc - * the descTblDesc to set - */ - public void setDescTblDesc(DescTableDesc descTblDesc) { - this.descTblDesc = descTblDesc; - } - /** * @return information about the partitions we want to add. */ @@ -1007,14 +696,6 @@ public AddPartitionDesc getAddPartitionDesc() { return addPartitionDesc; } - /** - * @param addPartitionDesc - * information about the partitions we want to add. - */ - public void setAddPartitionDesc(AddPartitionDesc addPartitionDesc) { - this.addPartitionDesc = addPartitionDesc; - } - /** * @return information about the partitions we want to rename. */ @@ -1022,14 +703,6 @@ public RenamePartitionDesc getRenamePartitionDesc() { return renamePartitionDesc; } - /** - * @param renamePartitionDesc - * information about the partitions we want to rename. - */ - public void setRenamePartitionDesc(RenamePartitionDesc renamePartitionDesc) { - this.renamePartitionDesc = renamePartitionDesc; - } - /** * @return information about the table/partitions we want to alter. */ @@ -1037,14 +710,6 @@ public AlterTableSimpleDesc getAlterTblSimpleDesc() { return alterTblSimpleDesc; } - /** - * @param desc - * information about the table/partitions we want to alter. - */ - public void setAlterTblSimpleDesc(AlterTableSimpleDesc desc) { - this.alterTblSimpleDesc = desc; - } - /** * @return Metastore check description */ @@ -1052,14 +717,6 @@ public MsckDesc getMsckDesc() { return msckDesc; } - /** - * @param msckDesc - * metastore check description - */ - public void setMsckDesc(MsckDesc msckDesc) { - this.msckDesc = msckDesc; - } - /** * @return show table descriptor */ @@ -1067,26 +724,10 @@ public ShowTableStatusDesc getShowTblStatusDesc() { return showTblStatusDesc; } - /** - * @param showTblStatusDesc - * show table descriptor - */ - public void setShowTblStatusDesc(ShowTableStatusDesc showTblStatusDesc) { - this.showTblStatusDesc = showTblStatusDesc; - } - public ShowTblPropertiesDesc getShowTblPropertiesDesc() { return showTblPropertiesDesc; } - public void setShowTblPropertiesDesc(ShowTblPropertiesDesc showTblPropertiesDesc) { - this.showTblPropertiesDesc = showTblPropertiesDesc; - } - - public void setDescFunctionDesc(DescFunctionDesc descFunctionDesc) { - this.descFunctionDesc = descFunctionDesc; - } - public HashSet getInputs() { return inputs; } @@ -1095,14 +736,6 @@ public void setDescFunctionDesc(DescFunctionDesc descFunctionDesc) { return outputs; } - public void setInputs(HashSet inputs) { - this.inputs = inputs; - } - - public void setOutputs(HashSet outputs) { - this.outputs = outputs; - } - /** * @return role ddl desc */ @@ -1110,13 +743,6 @@ public RoleDDLDesc getRoleDDLDesc() { return roleDDLDesc; } - /** - * @param roleDDLDesc role ddl desc - */ - public void setRoleDDLDesc(RoleDDLDesc roleDDLDesc) { - this.roleDDLDesc = roleDDLDesc; - } - /** * @return grant desc */ @@ -1124,13 +750,6 @@ public GrantDesc getGrantDesc() { return grantDesc; } - /** - * @param grantDesc grant desc - */ - public void setGrantDesc(GrantDesc grantDesc) { - this.grantDesc = grantDesc; - } - /** * @return show grant desc */ @@ -1138,40 +757,14 @@ public ShowGrantDesc getShowGrantDesc() { return showGrantDesc; } - /** - * @param showGrantDesc - */ - public void setShowGrantDesc(ShowGrantDesc showGrantDesc) { - this.showGrantDesc = showGrantDesc; - } - public RevokeDesc getRevokeDesc() { return revokeDesc; } - public void setRevokeDesc(RevokeDesc revokeDesc) { - this.revokeDesc = revokeDesc; - } - public GrantRevokeRoleDDL getGrantRevokeRoleDDL() { return grantRevokeRoleDDL; } - /** - * @param grantRevokeRoleDDL - */ - public void setGrantRevokeRoleDDL(GrantRevokeRoleDDL grantRevokeRoleDDL) { - this.grantRevokeRoleDDL = grantRevokeRoleDDL; - } - - public void setAlterDatabaseDesc(AlterDatabaseDesc alterDbDesc) { - this.alterDbDesc = alterDbDesc; - } - - public AlterDatabaseDesc getAlterDatabaseDesc() { - return this.alterDbDesc; - } - /** * @return descriptor for merging files */ @@ -1179,13 +772,6 @@ public AlterTablePartMergeFilesDesc getMergeFilesDesc() { return mergeFilesDesc; } - /** - * @param mergeDesc descriptor of merging files - */ - public void setMergeFilesDesc(AlterTablePartMergeFilesDesc mergeDesc) { - this.mergeFilesDesc = mergeDesc; - } - public boolean getNeedLock() { return needLock; } @@ -1201,23 +787,11 @@ public AlterTableAlterPartDesc getAlterTableAlterPartDesc() { return alterTableAlterPartDesc; } - /** - * @param alterPartitionDesc - * information about the partitions we want to change. - */ - public void setAlterTableAlterPartDesc(AlterTableAlterPartDesc alterPartitionDesc) { - this.alterTableAlterPartDesc = alterPartitionDesc; - } - @Explain(displayName = "Truncate Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public TruncateTableDesc getTruncateTblDesc() { return truncateTblDesc; } - public void setTruncateTblDesc(TruncateTableDesc truncateTblDesc) { - this.truncateTblDesc = truncateTblDesc; - } - /** * @return information about the table partition to be exchanged */ @@ -1232,131 +806,66 @@ public CacheMetadataDesc getCacheMetadataDesc() { return this.cacheMetadataDesc; } - /** - * @param alterTableExchangePartition - * set the value of the table partition to be exchanged - */ - public void setAlterTableExchangePartition( - AlterTableExchangePartition alterTableExchangePartition) { - this.alterTableExchangePartition = alterTableExchangePartition; - } - public ShowConfDesc getShowConfDesc() { return showConfDesc; } - public void setShowConfDesc(ShowConfDesc showConfDesc) { - this.showConfDesc = showConfDesc; - } - @Explain(displayName = "Insert operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public InsertCommitHookDesc getInsertCommitHookDesc() { return insertCommitHookDesc; } - public void setInsertCommitHookDesc(InsertCommitHookDesc insertCommitHookDesc) { - this.insertCommitHookDesc = insertCommitHookDesc; - } - @Explain(displayName = "Pre Insert operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public PreInsertTableDesc getPreInsertTableDesc() { return preInsertTableDesc; } - public void setPreInsertTableDesc(PreInsertTableDesc preInsertTableDesc) { - this.preInsertTableDesc = preInsertTableDesc; - } - @Explain(displayName = "Create resource plan") public CreateResourcePlanDesc getCreateResourcePlanDesc() { return createResourcePlanDesc; } - public void setCreateResourcePlanDesc(CreateResourcePlanDesc createResourcePlanDesc) { - this.createResourcePlanDesc = createResourcePlanDesc; - } - @Explain(displayName = "Show resource plan") public ShowResourcePlanDesc getShowResourcePlanDesc() { return showResourcePlanDesc; } - public void setShowResourcePlanDesc(ShowResourcePlanDesc showResourcePlanDesc) { - this.showResourcePlanDesc = showResourcePlanDesc; - } - public DropResourcePlanDesc getDropResourcePlanDesc() { return dropResourcePlanDesc; } - public void setDropResourcePlanDesc(DropResourcePlanDesc dropResourcePlanDesc) { - this.dropResourcePlanDesc = dropResourcePlanDesc; - } - public AlterResourcePlanDesc getAlterResourcePlanDesc() { return alterResourcePlanDesc; } - public void setAlterResourcePlanDesc(AlterResourcePlanDesc alterResourcePlanDesc) { - this.alterResourcePlanDesc = alterResourcePlanDesc; - } - public CreateWMTriggerDesc getCreateWMTriggerDesc() { return createWMTriggerDesc; } - public void setCreateWMTriggerDesc(CreateWMTriggerDesc createWMTriggerDesc) { - this.createWMTriggerDesc = createWMTriggerDesc; - } - public AlterWMTriggerDesc getAlterWMTriggerDesc() { return alterWMTriggerDesc; } - public void setAlterWMTriggerDesc(AlterWMTriggerDesc alterWMTriggerDesc) { - this.alterWMTriggerDesc = alterWMTriggerDesc; - } - public DropWMTriggerDesc getDropWMTriggerDesc() { return dropWMTriggerDesc; } - public void setDropWMTriggerDesc(DropWMTriggerDesc dropWMTriggerDesc) { - this.dropWMTriggerDesc = dropWMTriggerDesc; - } - public CreateOrAlterWMPoolDesc getWmPoolDesc() { return wmPoolDesc; } - public void setWmPoolDesc(CreateOrAlterWMPoolDesc wmPoolDesc) { - this.wmPoolDesc = wmPoolDesc; - } - public DropWMPoolDesc getDropWMPoolDesc() { return dropWMPoolDesc; } - public void setDropWMPoolDesc(DropWMPoolDesc dropWMPoolDesc) { - this.dropWMPoolDesc = dropWMPoolDesc; - } - public CreateOrAlterWMMappingDesc getWmMappingDesc() { return wmMappingDesc; } - public void setWmMappingDesc(CreateOrAlterWMMappingDesc wmMappingDesc) { - this.wmMappingDesc = wmMappingDesc; - } - public DropWMMappingDesc getDropWMMappingDesc() { return dropWMMappingDesc; } - public void setDropWMMappingDesc(DropWMMappingDesc dropWMMappingDesc) { - this.dropWMMappingDesc = dropWMMappingDesc; - } - public CreateOrDropTriggerToPoolMappingDesc getTriggerToPoolMappingDesc() { return triggerToPoolMappingDesc; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/TezEdgeProperty.java ql/src/java/org/apache/hadoop/hive/ql/plan/TezEdgeProperty.java index 0abacb3cb8..e6e82613bb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/TezEdgeProperty.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/TezEdgeProperty.java @@ -41,6 +41,7 @@ private int minReducer; private int maxReducer; private long inputSizePerReducer; + private Integer bufferSize; public TezEdgeProperty(HiveConf hiveConf, EdgeType edgeType, int buckets) { @@ -105,6 +106,14 @@ public void setSlowStart(boolean slowStart) { this.isSlowStart = slowStart; } + public void setBufferSize(Integer bufferSize) { + this.bufferSize = bufferSize; + } + + public Integer getBufferSize() { + return bufferSize; + } + public void setEdgeType(EdgeType type) { this.edgeType = type; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java index 807eca92f1..2962fa5cff 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java @@ -130,7 +130,7 @@ public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo paramInfo) @Override public void doReset(AverageAggregationBuffer aggregation) throws HiveException { aggregation.count = 0; - aggregation.sum = new Double(0); + aggregation.sum = Double.valueOf(0); aggregation.uniqueObjects = new HashSet(); } @@ -225,7 +225,7 @@ protected DoubleWritable getNextResult( throws HiveException { AverageAggregationBuffer myagg = (AverageAggregationBuffer) ss.wrappedBuf; return myagg.count == 0 ? null : new Object[] { - new Double(myagg.sum), myagg.count }; + myagg.sum, myagg.count}; } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java index 79bf2be4ec..6ce8734e8f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java @@ -492,7 +492,7 @@ protected Double getCurrentIntermediateResult( org.apache.hadoop.hive.ql.udf.generic.GenericUDAFStreamingEvaluator.SumAvgEnhancer.SumAvgStreamingState ss) throws HiveException { SumDoubleAgg myagg = (SumDoubleAgg) ss.wrappedBuf; - return myagg.empty ? null : new Double(myagg.sum); + return myagg.empty ? null : myagg.sum; } }; @@ -621,7 +621,7 @@ protected Long getCurrentIntermediateResult( org.apache.hadoop.hive.ql.udf.generic.GenericUDAFStreamingEvaluator.SumAvgEnhancer.SumAvgStreamingState ss) throws HiveException { SumLongAgg myagg = (SumLongAgg) ss.wrappedBuf; - return myagg.empty ? null : new Long(myagg.sum); + return myagg.empty ? null : myagg.sum; } }; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStringToMap.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStringToMap.java index 480ffdef0e..3aa49865f5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStringToMap.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStringToMap.java @@ -43,8 +43,9 @@ @Description(name = "str_to_map", value = "_FUNC_(text, delimiter1, delimiter2) - " + "Creates a map by parsing text ", extended = "Split text into key-value pairs" + " using two delimiters. The first delimiter separates pairs, and the" - + " second delimiter sperates key and value. If only one parameter is given, default" - + " delimiters are used: ',' as delimiter1 and ':' as delimiter2.") + + " second delimiter sperates key and value. If only one or two parameters are given, default" + + " delimiters are used: ',' as delimiter1 and ':' as delimiter2. Note also that" + + " both delimiter1 and delimiter2 are treated as regular expressions.") public class GenericUDFStringToMap extends GenericUDF { // Must be deterministic order map for consistent q-test output across Java versions - see HIVE-9161 private final LinkedHashMap ret = new LinkedHashMap(); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSQLSchema.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSQLSchema.java new file mode 100644 index 0000000000..960ad19ab2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSQLSchema.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.udf.generic; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.ParseException; +import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.udf.UDFType; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector; +import org.apache.hadoop.mapred.JobConf; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * GenericUDTFGetSQLSchema. + */ +@Description(name = "get_sql_schema", value = "_FUNC_(string) - " + + "Takes query as argument. Returns schema (column names and types) of the resultset " + + " that would be generated when the query is executed. " + + "Can be invoked like: select get_sql_schema(\"select * from some_table\")." + + "NOTE: This does not produce any output for DDL queries like show tables/databases/... and others.") +@UDFType(deterministic = false) +public class GenericUDTFGetSQLSchema extends GenericUDTF { + private static final Logger LOG = LoggerFactory.getLogger(GenericUDTFGetSQLSchema.class); + + protected transient StringObjectInspector stringOI; + protected transient JobConf jc; + + private final transient Object[] nameTypePair = new Object[2]; + + @Override + public void process(Object[] arguments) throws HiveException { + + String query = stringOI.getPrimitiveJavaObject(arguments[0]); + LOG.debug("Getting schema for Query: {}", query); + HiveConf conf = new HiveConf(SessionState.get().getConf()); + List fieldSchemas = null; + try { + fieldSchemas = ParseUtils.parseQueryAndGetSchema(conf, query); + } catch (IOException | ParseException e) { + throw new HiveException(e); + } + + if (fieldSchemas != null) { + for (FieldSchema fieldSchema : fieldSchemas) { + nameTypePair[0] = fieldSchema.getName().getBytes(StandardCharsets.UTF_8); + nameTypePair[1] = fieldSchema.getType().getBytes(StandardCharsets.UTF_8); + forward(nameTypePair); + } + } + } + + @Override + public StructObjectInspector initialize(ObjectInspector[] arguments) + throws UDFArgumentException { + + LOG.debug("initializing GenericUDTFGetSQLSchema"); + + if (SessionState.get() == null || SessionState.get().getConf() == null) { + throw new IllegalStateException("Cannot run GET_SQL_SCHEMA outside HS2"); + } + LOG.debug("Initialized conf, jc and metastore connection"); + + if (arguments.length != 1) { + throw new UDFArgumentLengthException( + "The function GET_SQL_SCHEMA accepts 1 argument."); + } else if (!(arguments[0] instanceof StringObjectInspector)) { + LOG.error("Got " + arguments[0].getTypeName() + " instead of string."); + throw new UDFArgumentTypeException(0, "\"" + + "string\" is expected at function GET_SQL_SCHEMA, " + "but \"" + + arguments[0].getTypeName() + "\" is found"); + } + + stringOI = (StringObjectInspector) arguments[0]; + + List names = Arrays.asList("col_name", "col_type"); + List fieldOIs = Arrays.asList(PrimitiveObjectInspectorFactory.javaByteArrayObjectInspector, + PrimitiveObjectInspectorFactory.javaByteArrayObjectInspector); + StructObjectInspector outputOI = ObjectInspectorFactory + .getStandardStructObjectInspector(names, fieldOIs); + + LOG.debug("done initializing GenericUDTFGetSQLSchema"); + return outputOI; + } + + @Override + public String toString() { + return "get_sql_schema"; + } + + @Override + public void close() throws HiveException { + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NGramEstimator.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NGramEstimator.java index 659512525d..30f06e1bbe 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NGramEstimator.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NGramEstimator.java @@ -102,6 +102,7 @@ public void reset() { ArrayList result = new ArrayList(); ArrayList, Double>> list = new ArrayList(ngrams.entrySet()); Collections.sort(list, new Comparator, Double>>() { + @Override public int compare(Map.Entry, Double> o1, Map.Entry, Double> o2) { int result = o2.getValue().compareTo(o1.getValue()); @@ -155,7 +156,7 @@ public void add(ArrayList ng) throws HiveException { Double curFreq = ngrams.get(ng); if(curFreq == null) { // new n-gram - curFreq = new Double(1.0); + curFreq = Double.valueOf(1.0); } else { // existing n-gram, just increment count curFreq++; @@ -192,6 +193,7 @@ public void add(ArrayList ng) throws HiveException { private void trim(boolean finalTrim) throws HiveException { ArrayList,Double>> list = new ArrayList(ngrams.entrySet()); Collections.sort(list, new Comparator,Double>>() { + @Override public int compare(Map.Entry,Double> o1, Map.Entry,Double> o2) { return o1.getValue().compareTo(o2.getValue()); @@ -246,7 +248,7 @@ public void merge(List other) throws HiveException { double val = Double.parseDouble( other.get(i).toString() ); Double myval = ngrams.get(key); if(myval == null) { - myval = new Double(val); + myval = Double.valueOf(val); } else { myval += val; } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java index 1fa268d7af..d261409efd 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java @@ -474,7 +474,7 @@ public void testVectorizeFilterAndOrExpression() throws HiveException { greaterExprDesc.setChildren(children1); ExprNodeColumnDesc col2Expr = new ExprNodeColumnDesc(Float.class, "col2", "table", false); - ExprNodeConstantDesc const2Desc = new ExprNodeConstantDesc(new Float(1.0)); + ExprNodeConstantDesc const2Desc = new ExprNodeConstantDesc(Float.valueOf(1.0f)); GenericUDFOPLessThan udf2 = new GenericUDFOPLessThan(); ExprNodeGenericFuncDesc lessExprDesc = new ExprNodeGenericFuncDesc(); @@ -535,7 +535,7 @@ public void testVectorizeFilterMultiAndOrExpression() throws HiveException { greaterExprDesc.setChildren(children1); ExprNodeColumnDesc col2Expr = new ExprNodeColumnDesc(Float.class, "col2", "table", false); - ExprNodeConstantDesc const2Desc = new ExprNodeConstantDesc(new Float(1.0)); + ExprNodeConstantDesc const2Desc = new ExprNodeConstantDesc(Float.valueOf(1.0f)); GenericUDFOPLessThan udf2 = new GenericUDFOPLessThan(); ExprNodeGenericFuncDesc lessExprDesc = new ExprNodeGenericFuncDesc(); @@ -1114,7 +1114,7 @@ public void testBetweenFilters() throws HiveException { // string BETWEEN GenericUDFBetween udf = new GenericUDFBetween(); List children1 = new ArrayList(); - children1.add(new ExprNodeConstantDesc(new Boolean(false))); // no NOT keyword + children1.add(new ExprNodeConstantDesc(Boolean.FALSE)); // no NOT keyword children1.add(col1Expr); children1.add(constDesc); children1.add(constDesc2); @@ -1131,7 +1131,7 @@ public void testBetweenFilters() throws HiveException { assertTrue(ve instanceof FilterStringColumnBetween); // string NOT BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(true))); // has NOT keyword + children1.set(0, new ExprNodeConstantDesc(Boolean.TRUE)); // has NOT keyword ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER); assertTrue(ve instanceof FilterStringColumnNotBetween); @@ -1144,7 +1144,7 @@ public void testBetweenFilters() throws HiveException { // CHAR BETWEEN udf = new GenericUDFBetween(); children1 = new ArrayList(); - children1.add(new ExprNodeConstantDesc(new Boolean(false))); // no NOT keyword + children1.add(new ExprNodeConstantDesc(Boolean.FALSE)); // no NOT keyword children1.add(col1Expr); children1.add(constDesc); children1.add(constDesc2); @@ -1156,7 +1156,7 @@ public void testBetweenFilters() throws HiveException { assertTrue(ve instanceof FilterCharColumnBetween); // CHAR NOT BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(true))); // has NOT keyword + children1.set(0, new ExprNodeConstantDesc(Boolean.TRUE)); // has NOT keyword ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER); assertTrue(ve instanceof FilterCharColumnNotBetween); @@ -1169,7 +1169,7 @@ public void testBetweenFilters() throws HiveException { // VARCHAR BETWEEN udf = new GenericUDFBetween(); children1 = new ArrayList(); - children1.add(new ExprNodeConstantDesc(new Boolean(false))); // no NOT keyword + children1.add(new ExprNodeConstantDesc(Boolean.FALSE)); // no NOT keyword children1.add(col1Expr); children1.add(constDesc); children1.add(constDesc2); @@ -1181,12 +1181,12 @@ public void testBetweenFilters() throws HiveException { assertTrue(ve instanceof FilterVarCharColumnBetween); // VARCHAR NOT BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(true))); // has NOT keyword + children1.set(0, new ExprNodeConstantDesc(Boolean.TRUE)); // has NOT keyword ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER); assertTrue(ve instanceof FilterVarCharColumnNotBetween); // long BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(false))); + children1.set(0, new ExprNodeConstantDesc(Boolean.FALSE)); children1.set(1, new ExprNodeColumnDesc(Long.class, "col1", "table", false)); children1.set(2, new ExprNodeConstantDesc(10)); children1.set(3, new ExprNodeConstantDesc(20)); @@ -1194,12 +1194,12 @@ public void testBetweenFilters() throws HiveException { assertTrue(ve instanceof FilterLongColumnBetween); // long NOT BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(true))); + children1.set(0, new ExprNodeConstantDesc(Boolean.TRUE)); ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER); assertTrue(ve instanceof FilterLongColumnNotBetween); // double BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(false))); + children1.set(0, new ExprNodeConstantDesc(Boolean.FALSE)); children1.set(1, new ExprNodeColumnDesc(Double.class, "col1", "table", false)); children1.set(2, new ExprNodeConstantDesc(10.0d)); children1.set(3, new ExprNodeConstantDesc(20.0d)); @@ -1207,12 +1207,12 @@ public void testBetweenFilters() throws HiveException { assertTrue(ve instanceof FilterDoubleColumnBetween); // double NOT BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(true))); + children1.set(0, new ExprNodeConstantDesc(Boolean.TRUE)); ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER); assertTrue(ve instanceof FilterDoubleColumnNotBetween); // timestamp BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(false))); + children1.set(0, new ExprNodeConstantDesc(Boolean.FALSE)); children1.set(1, new ExprNodeColumnDesc(Timestamp.class, "col1", "table", false)); children1.set(2, new ExprNodeConstantDesc("2013-11-05 00:00:00.000")); children1.set(3, new ExprNodeConstantDesc("2013-11-06 00:00:00.000")); @@ -1220,7 +1220,7 @@ public void testBetweenFilters() throws HiveException { assertEquals(FilterTimestampColumnBetween.class, ve.getClass()); // timestamp NOT BETWEEN - children1.set(0, new ExprNodeConstantDesc(new Boolean(true))); + children1.set(0, new ExprNodeConstantDesc(Boolean.TRUE)); ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER); assertEquals(FilterTimestampColumnNotBetween.class, ve.getClass()); } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorBetweenIn.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorBetweenIn.java index 16bb445eee..3f09cb1c39 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorBetweenIn.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorBetweenIn.java @@ -464,7 +464,7 @@ private boolean doBetweenInVariation(Random random, String typeName, List children = new ArrayList(); if (isBetween) { - children.add(new ExprNodeConstantDesc(new Boolean(isInvert))); + children.add(new ExprNodeConstantDesc(Boolean.valueOf(isInvert))); } children.add(col1Expr); for (Object compareObject : compareList) { diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 5c13d45d6e..25cd65737b 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -2655,7 +2655,7 @@ public void testSplitElimination() throws Exception { SearchArgument sarg = SearchArgumentFactory.newBuilder() .startAnd() - .lessThan("z", PredicateLeaf.Type.LONG, new Long(0)) + .lessThan("z", PredicateLeaf.Type.LONG, Long.valueOf(0)) .end() .build(); conf.set("sarg.pushdown", toKryo(sarg)); diff --git ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRecordReaderWrapper.java ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRecordReaderWrapper.java index c4a4c21cfe..0210a0a372 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRecordReaderWrapper.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestParquetRecordReaderWrapper.java @@ -153,8 +153,8 @@ public void testBuilderFloat() throws Exception { .lessThan("x1", PredicateLeaf.Type.LONG, 22L) .lessThanEquals("y", PredicateLeaf.Type.STRING, new HiveChar("hi", 10).toString()) - .equals("z", PredicateLeaf.Type.FLOAT, new Double(0.22)) - .equals("z1", PredicateLeaf.Type.FLOAT, new Double(0.22)) + .equals("z", PredicateLeaf.Type.FLOAT, Double.valueOf(0.22)) + .equals("z1", PredicateLeaf.Type.FLOAT, Double.valueOf(0.22)) .end() .build(); MessageType schema = MessageTypeParser.parseMessageType("message test {" + diff --git ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java index 79a2f4120d..0f402ec29a 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java @@ -412,8 +412,8 @@ public void testBuilderFloat() throws Exception { .lessThan("x1", PredicateLeaf.Type.LONG, 22L) .lessThanEquals("y", PredicateLeaf.Type.STRING, new HiveChar("hi", 10).toString()) - .equals("z", PredicateLeaf.Type.FLOAT, new Double(0.22)) - .equals("z1", PredicateLeaf.Type.FLOAT, new Double(0.22)) + .equals("z", PredicateLeaf.Type.FLOAT, Double.valueOf(0.22)) + .equals("z1", PredicateLeaf.Type.FLOAT, Double.valueOf(0.22)) .end() .build(); assertEquals("leaf-0 = (LESS_THAN x 22), " + diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java index e5098bdc66..11f0876525 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.io.FloatWritable; import org.apache.hadoop.io.IntWritable; @@ -42,6 +41,8 @@ public class TestGenericUDFOPDivide extends AbstractTestGenericUDFOPNumeric { + private static final double EPSILON = 1E-6; + @Test public void testByteDivideShort() throws HiveException { GenericUDFOPDivide udf = new GenericUDFOPDivide(); @@ -82,7 +83,7 @@ public void testVarcharDivideInt() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.doubleTypeInfo); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(123.0 / 456.0), new Double(res.get())); + Assert.assertEquals(123.0 / 456.0, res.get(), EPSILON); } @Test @@ -103,7 +104,7 @@ public void testDoubleDivideLong() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(0.45), new Double(res.get())); + Assert.assertEquals(0.45, res.get(), EPSILON); } @Test @@ -145,7 +146,7 @@ public void testFloatDivideFloat() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.doubleTypeInfo); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(3.0), new Double(res.get())); + Assert.assertEquals(3.0, res.get(), EPSILON); } @Test @@ -166,7 +167,7 @@ public void testDouleDivideDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(74.52 / 234.97), new Double(res.get())); + Assert.assertEquals(74.52 / 234.97, res.get(), EPSILON); } @Test diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java index 504aa7a078..264daaa2fe 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java @@ -49,6 +49,8 @@ public class TestGenericUDFOPMinus extends AbstractTestGenericUDFOPNumeric { + private static final double EPSILON = 1E-6; + @Test public void testByteMinusShort() throws HiveException { GenericUDFOPMinus udf = new GenericUDFOPMinus(); @@ -89,7 +91,7 @@ public void testVarcharMinusInt() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.doubleTypeInfo); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(-333.0), new Double(res.get())); + Assert.assertEquals(-333.0, res.get(), EPSILON); } @Test @@ -111,7 +113,7 @@ public void testDoubleMinusLong() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(-5.5), new Double(res.get())); + Assert.assertEquals(-5.5, res.get(), EPSILON); } @Test @@ -153,7 +155,7 @@ public void testFloatMinusFloat() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.floatTypeInfo); FloatWritable res = (FloatWritable) udf.evaluate(args); - Assert.assertEquals(new Float(4.5), new Float(res.get())); + Assert.assertEquals(4.5, res.get(), EPSILON); } @Test @@ -174,7 +176,7 @@ public void testDouleMinusDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(-160.45), new Double(res.get())); + Assert.assertEquals(-160.45, res.get(), EPSILON); } @Test diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java index b945a8529b..0f05f25485 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java @@ -41,6 +41,8 @@ public class TestGenericUDFOPMultiply extends AbstractTestGenericUDFOPNumeric { + private static final double EPSILON = 1E-6; + @Test public void testByteTimesShort() throws HiveException { GenericUDFOPMultiply udf = new GenericUDFOPMultiply(); @@ -81,7 +83,7 @@ public void testVarcharTimesInt() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.doubleTypeInfo); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(123 * 456), new Double(res.get())); + Assert.assertEquals(123 * 456, res.get(), EPSILON); } @Test @@ -102,7 +104,7 @@ public void testDoubleTimesLong() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(45.0), new Double(res.get())); + Assert.assertEquals(45.0, res.get(), EPSILON); } @Test @@ -144,7 +146,7 @@ public void testFloatTimesFloat() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.floatTypeInfo); FloatWritable res = (FloatWritable) udf.evaluate(args); - Assert.assertEquals(new Float(0.0), new Float(res.get())); + Assert.assertEquals(0.0, res.get(), EPSILON); } @Test @@ -165,7 +167,7 @@ public void testDouleTimesDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(17509.9644), new Double(res.get())); + Assert.assertEquals(17509.9644, res.get(), EPSILON); } @Test diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNegative.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNegative.java index 7a285c8938..0dc4f9f86d 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNegative.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNegative.java @@ -46,6 +46,8 @@ public class TestGenericUDFOPNegative { + private static final double EPSILON = 1E-6; + @Test public void testByte() throws HiveException { GenericUDFOPNegative udf = new GenericUDFOPNegative(); @@ -133,7 +135,7 @@ public void testFloat() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.floatTypeInfo, oi.getTypeInfo()); FloatWritable res = (FloatWritable) udf.evaluate(args); - Assert.assertEquals(new Float(-323.4747f), new Float(res.get())); + Assert.assertEquals(-323.4747f, res.get(), EPSILON); } @Test @@ -151,7 +153,7 @@ public void testDouble() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(-32300.004747), new Double(res.get())); + Assert.assertEquals(-32300.004747, res.get(), EPSILON); } @Test @@ -188,7 +190,7 @@ public void testString() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(-32300.004747), new Double(res.get())); + Assert.assertEquals(-32300.004747, res.get(), EPSILON); } @Test @@ -208,7 +210,7 @@ public void testVarchar() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(-32300.004747), new Double(res.get())); + Assert.assertEquals(-32300.004747, res.get(), EPSILON); } @Test @@ -228,7 +230,7 @@ public void testChar() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(-32300.004747), new Double(res.get())); + Assert.assertEquals(-32300.004747, res.get(), EPSILON); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java index 2169999bce..fe7dc7b0f8 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java @@ -49,6 +49,8 @@ public class TestGenericUDFOPPlus extends AbstractTestGenericUDFOPNumeric { + private static final double EPSILON = 1E-6; + @Test public void testBytePlusShort() throws HiveException { GenericUDFOPPlus udf = new GenericUDFOPPlus(); @@ -91,7 +93,7 @@ public void testVarcharPlusInt() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.doubleTypeInfo); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(579.0), new Double(res.get())); + Assert.assertEquals(579.0, res.get(), EPSILON); } @Test @@ -113,7 +115,7 @@ public void testDoublePlusLong() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(14.5), new Double(res.get())); + Assert.assertEquals(14.5, res.get(), EPSILON); } @Test @@ -157,7 +159,7 @@ public void testFloatPlusFloat() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.floatTypeInfo); FloatWritable res = (FloatWritable) udf.evaluate(args); - Assert.assertEquals(new Float(4.5), new Float(res.get())); + Assert.assertEquals(4.5, res.get(), EPSILON); } @Test @@ -179,7 +181,7 @@ public void testDoulePlusDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(309.49), new Double(res.get())); + Assert.assertEquals(309.49, res.get(), EPSILON); } @Test diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPositive.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPositive.java index 6d464da1d4..53bdc47b19 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPositive.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPositive.java @@ -46,6 +46,8 @@ public class TestGenericUDFOPPositive { + private static final double EPSILON = 1E-6; + @Test public void testByte() throws HiveException { GenericUDFOPPositive udf = new GenericUDFOPPositive(); @@ -133,7 +135,7 @@ public void testFloat() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.floatTypeInfo, oi.getTypeInfo()); FloatWritable res = (FloatWritable) udf.evaluate(args); - Assert.assertEquals(new Float(323.4747f), new Float(res.get())); + Assert.assertEquals(323.4747, res.get(), EPSILON); } @Test @@ -151,7 +153,7 @@ public void testDouble() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(32300.004747), new Double(res.get())); + Assert.assertEquals(32300.004747, res.get(), EPSILON); } @Test @@ -188,7 +190,7 @@ public void testString() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(32300.004747), new Double(res.get())); + Assert.assertEquals(32300.004747, res.get(), EPSILON); } @Test @@ -208,7 +210,7 @@ public void testVarchar() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(32300.004747), new Double(res.get())); + Assert.assertEquals(32300.004747, res.get(), EPSILON); } @Test @@ -228,7 +230,7 @@ public void testChar() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(32300.004747), new Double(res.get())); + Assert.assertEquals(32300.004747, res.get(), EPSILON); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFPower.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFPower.java index 2812666cee..ba5033fd97 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFPower.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFPower.java @@ -39,6 +39,8 @@ public class TestGenericUDFPower { + private static final double EPSILON = 1E-10; + @Test public void testBytePowerShort() throws HiveException { GenericUDFPower udf = new GenericUDFPower(); @@ -57,7 +59,7 @@ public void testBytePowerShort() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(16), new Double(res.get())); + Assert.assertEquals(16, res.get(), EPSILON); } @Test @@ -79,7 +81,7 @@ public void testVarcharPowerInt() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.doubleTypeInfo); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(3.14 * 3.14), new Double(res.get())); + Assert.assertEquals(3.14 * 3.14, res.get(), EPSILON); } @Test @@ -100,7 +102,7 @@ public void testDoublePowerLong() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(4.5 * 4.5 * 4.5 * 4.5), new Double(res.get())); + Assert.assertEquals(4.5 * 4.5 * 4.5 * 4.5, res.get(), EPSILON); } @Test @@ -121,7 +123,7 @@ public void testLongPowerDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(1380.3842646028852), new Double(res.get())); + Assert.assertEquals(1380.3842646028852, res.get(), EPSILON); } @Test @@ -142,7 +144,7 @@ public void testFloatPowerFloat() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.doubleTypeInfo); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(0.10475656017578482), new Double(res.get())); + Assert.assertEquals(0.10475656017578482, res.get(), EPSILON); } @Test @@ -163,7 +165,7 @@ public void testShortPowerFloat() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.doubleTypeInfo); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(0.009065844089438033), new Double(res.get())); + Assert.assertEquals(0.009065844089438033, res.get(), EPSILON); } @Test @@ -184,7 +186,7 @@ public void testDoulePowerDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals("Unexpected result", new Double(-4.52 * 4.52 * 4.52), new Double(res.get()), 1e-6); + Assert.assertEquals("Unexpected result", -4.52 * 4.52 * 4.52, res.get(), EPSILON); } @Test @@ -205,7 +207,7 @@ public void testDecimalPowerDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo()); DoubleWritable res = (DoubleWritable) udf.evaluate(args); - Assert.assertEquals(new Double(1.9214203800477838E-4), new Double(res.get())); + Assert.assertEquals(1.9214203800477838E-4, res.get(), EPSILON); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java new file mode 100644 index 0000000000..3615d2b778 --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.udf.generic; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * TestGenericUDTFGetSQLSchema. + */ +public class TestGenericUDTFGetSQLSchema { + + private static SessionState sessionState; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + HiveConf conf = new HiveConf(); + conf.set("hive.security.authorization.manager", + "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"); + sessionState = SessionState.start(conf); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + SessionState.endStart(sessionState); + } + + @Test + public void testWithComplexTypes() throws Exception { + invokeUDTFAndTest("select array('val1','val2') c1," + + " named_struct('a',1,'b','2') c2, " + + " array(array(1)) c3," + + " array(named_struct('a',1,'b','2')) c4," + + " map(1,1) c5", + new String[]{"c1", "array", + "c2", "struct", + "c3", "array>", + "c4", "array>", + "c5", "map" + }); + } + + @Test + public void testWithSimpleTypes() throws Exception { + invokeUDTFAndTest("select 1 as c1, 'Happy Valentines Day' as c2, 2.2 as c3, cast(2.2 as float) c4, " + + "cast(2.2 as double) c5, " + + "cast('2019-02-14' as date) c6", + new String[]{"c1", "int", + "c2", "string", + "c3", "decimal(2,1)", + "c4", "float", + "c5", "double", + "c6", "date" + }); + } + + @Test + public void testWithDDL() throws Exception { + invokeUDTFAndTest("show tables", new String[]{}); + } + + private void invokeUDTFAndTest(String query, String[] expected) throws HiveException { + + GenericUDTFGetSQLSchema genericUDTFGetSQLSchema = new GenericUDTFGetSQLSchema(); + List actual = new ArrayList<>(); + genericUDTFGetSQLSchema.collector = input -> { + if (input != null) { + Object[] udfOutput = (Object[]) input; + actual.add(new String((byte[]) udfOutput[0])); + actual.add(new String((byte[]) udfOutput[1])); + } + }; + + genericUDTFGetSQLSchema + .initialize(new ObjectInspector[]{PrimitiveObjectInspectorFactory.javaStringObjectInspector}); + genericUDTFGetSQLSchema.process(new Object[]{query}); + + assertEquals(expected.length, actual.size()); + assertTrue("Failed for query: " + query + ". Expected: " + Arrays.toString(expected) + + ". Actual: " + actual, Arrays.equals(expected, actual.toArray())); + } + +} diff --git ql/src/test/queries/clientpositive/udtf_get_sql_schema.q ql/src/test/queries/clientpositive/udtf_get_sql_schema.q new file mode 100644 index 0000000000..b8fadd4bc2 --- /dev/null +++ ql/src/test/queries/clientpositive/udtf_get_sql_schema.q @@ -0,0 +1,17 @@ +set hive.fetch.task.conversion=more; +set hive.mapred.mode=nonstrict; + +describe function get_sql_schema; +describe function extended get_sql_schema; + +create table t1(c1 int, c2 float, c3 double, c4 string, c5 date, c6 array, c7 struct, c8 map); +insert into t1 select 1, 1.1, 2.2, 'val1', '2019-02-15', array(1), named_struct('a',1,'b','2'), map(1,1); + +explain select get_sql_schema('select * from t1'); +select get_sql_schema('select * from t1'); + +create external table t2(c1 int, c2 float, c3 double, c4 string, c5 date, c6 array, c7 struct, c8 map); +insert into t2 select 1, 1.1, 2.2, 'val1', '2019-02-15', array(1), named_struct('a',1,'b','2'), map(1,1); + +explain select get_sql_schema('select * from t2'); +select get_sql_schema('select * from t2'); diff --git ql/src/test/results/clientnegative/database_create_already_exists.q.out ql/src/test/results/clientnegative/database_create_already_exists.q.out index 16f45ba948..98eeda9bec 100644 --- ql/src/test/results/clientnegative/database_create_already_exists.q.out +++ ql/src/test/results/clientnegative/database_create_already_exists.q.out @@ -12,4 +12,4 @@ POSTHOOK: Output: database:test_db PREHOOK: query: CREATE DATABASE test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Database test_db already exists +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Database test_db already exists diff --git ql/src/test/results/clientnegative/database_create_invalid_name.q.out ql/src/test/results/clientnegative/database_create_invalid_name.q.out index 7a765e1f7a..9a74b0c246 100644 --- ql/src/test/results/clientnegative/database_create_invalid_name.q.out +++ ql/src/test/results/clientnegative/database_create_invalid_name.q.out @@ -6,4 +6,4 @@ default PREHOOK: query: CREATE DATABASE `test.db` PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test.db -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:test.db is not a valid database name) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidObjectException(message:test.db is not a valid database name) diff --git ql/src/test/results/clientnegative/database_drop_not_empty.q.out ql/src/test/results/clientnegative/database_drop_not_empty.q.out index 5758d67472..6b22e5d20b 100644 --- ql/src/test/results/clientnegative/database_drop_not_empty.q.out +++ ql/src/test/results/clientnegative/database_drop_not_empty.q.out @@ -33,4 +33,4 @@ PREHOOK: query: DROP DATABASE test_db PREHOOK: type: DROPDATABASE PREHOOK: Input: database:test_db PREHOOK: Output: database:test_db -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Database test_db is not empty. One or more tables exist.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Database test_db is not empty. One or more tables exist.) diff --git ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out index 5034977b65..3a712756c3 100644 --- ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out +++ ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out @@ -33,4 +33,4 @@ PREHOOK: query: DROP DATABASE db_drop_non_empty_restrict PREHOOK: type: DROPDATABASE PREHOOK: Input: database:db_drop_non_empty_restrict PREHOOK: Output: database:db_drop_non_empty_restrict -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Database db_drop_non_empty_restrict is not empty. One or more tables exist.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Database db_drop_non_empty_restrict is not empty. One or more tables exist.) diff --git ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out index e783251728..e417122929 100644 --- ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out +++ ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out @@ -12,4 +12,4 @@ PREHOOK: query: lock database drop_nodblock shared PREHOOK: type: LOCKDATABASE PREHOOK: Input: database:drop_nodblock PREHOOK: Output: database:drop_nodblock -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager diff --git ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out index d7a39f0092..1264e33a6b 100644 --- ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out +++ ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out @@ -12,4 +12,4 @@ PREHOOK: query: unlock database drop_nodbunlock PREHOOK: type: UNLOCKDATABASE PREHOOK: Input: database:drop_nodbunlock PREHOOK: Output: database:drop_nodbunlock -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager diff --git ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out index 2c46159bc3..9de0b37f69 100644 --- ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out +++ ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out @@ -59,4 +59,4 @@ PREHOOK: query: unlock database lockneg1 PREHOOK: type: UNLOCKDATABASE PREHOOK: Input: database:lockneg1 PREHOOK: Output: database:lockneg1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Database lockneg1 is not locked +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Database lockneg1 is not locked diff --git ql/src/test/results/clientnegative/lockneg_try_db_lock_conflict.q.out ql/src/test/results/clientnegative/lockneg_try_db_lock_conflict.q.out index fe12d83e83..0ebdb35055 100644 --- ql/src/test/results/clientnegative/lockneg_try_db_lock_conflict.q.out +++ ql/src/test/results/clientnegative/lockneg_try_db_lock_conflict.q.out @@ -17,4 +17,4 @@ PREHOOK: type: LOCKDATABASE PREHOOK: Input: database:lockneg4 PREHOOK: Output: database:lockneg4 Unable to acquire EXPLICIT, SHARED lock lockneg4 after 1 attempts. -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2 diff --git ql/src/test/results/clientnegative/lockneg_try_lock_db_in_use.q.out ql/src/test/results/clientnegative/lockneg_try_lock_db_in_use.q.out index e5c8f3e660..4d69f2805c 100644 --- ql/src/test/results/clientnegative/lockneg_try_lock_db_in_use.q.out +++ ql/src/test/results/clientnegative/lockneg_try_lock_db_in_use.q.out @@ -49,4 +49,4 @@ PREHOOK: type: LOCKDATABASE PREHOOK: Input: database:lockneg2 PREHOOK: Output: database:lockneg2 Unable to acquire EXPLICIT, EXCLUSIVE lock lockneg2 after 1 attempts. -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2 diff --git ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out index b73f3ac484..23dbced668 100644 --- ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out +++ ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out @@ -230,7 +230,7 @@ PREHOOK: query: DROP DATABASE encrypted_db PREHOOK: type: DROPDATABASE PREHOOK: Input: database:encrypted_db PREHOOK: Output: database:encrypted_db -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Database encrypted_db is not empty. One or more tables exist.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Database encrypted_db is not empty. One or more tables exist.) PREHOOK: query: DROP TABLE encrypted_db_outloc.renamed_encrypted_table_n1 PURGE PREHOOK: type: DROPTABLE PREHOOK: Input: encrypted_db_outloc@renamed_encrypted_table_n1 diff --git ql/src/test/results/clientpositive/show_functions.q.out ql/src/test/results/clientpositive/show_functions.q.out index 4942fddbe0..dc2b436c38 100644 --- ql/src/test/results/clientpositive/show_functions.q.out +++ ql/src/test/results/clientpositive/show_functions.q.out @@ -111,6 +111,7 @@ from_unixtime from_utc_timestamp get_json_object get_splits +get_sql_schema greatest grouping hash diff --git ql/src/test/results/clientpositive/udtf_get_sql_schema.q.out ql/src/test/results/clientpositive/udtf_get_sql_schema.q.out new file mode 100644 index 0000000000..2faf5aad57 --- /dev/null +++ ql/src/test/results/clientpositive/udtf_get_sql_schema.q.out @@ -0,0 +1,158 @@ +PREHOOK: query: describe function get_sql_schema +PREHOOK: type: DESCFUNCTION +POSTHOOK: query: describe function get_sql_schema +POSTHOOK: type: DESCFUNCTION +get_sql_schema(string) - Takes query as argument. Returns schema (column names and types) of the resultset that would be generated when the query is executed. Can be invoked like: select get_sql_schema("select * from some_table").NOTE: This does not produce any output for DDL queries like show tables/databases/... and others. +PREHOOK: query: describe function extended get_sql_schema +PREHOOK: type: DESCFUNCTION +POSTHOOK: query: describe function extended get_sql_schema +POSTHOOK: type: DESCFUNCTION +get_sql_schema(string) - Takes query as argument. Returns schema (column names and types) of the resultset that would be generated when the query is executed. Can be invoked like: select get_sql_schema("select * from some_table").NOTE: This does not produce any output for DDL queries like show tables/databases/... and others. +Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDTFGetSQLSchema +Function type:BUILTIN +PREHOOK: query: create table t1(c1 int, c2 float, c3 double, c4 string, c5 date, c6 array, c7 struct, c8 map) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create table t1(c1 int, c2 float, c3 double, c4 string, c5 date, c6 array, c7 struct, c8 map) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: insert into t1 select 1, 1.1, 2.2, 'val1', '2019-02-15', array(1), named_struct('a',1,'b','2'), map(1,1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t1 +POSTHOOK: query: insert into t1 select 1, 1.1, 2.2, 'val1', '2019-02-15', array(1), named_struct('a',1,'b','2'), map(1,1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.c1 SIMPLE [] +POSTHOOK: Lineage: t1.c2 EXPRESSION [] +POSTHOOK: Lineage: t1.c3 EXPRESSION [] +POSTHOOK: Lineage: t1.c4 SIMPLE [] +POSTHOOK: Lineage: t1.c5 EXPRESSION [] +POSTHOOK: Lineage: t1.c6 EXPRESSION [] +POSTHOOK: Lineage: t1.c7 EXPRESSION [] +POSTHOOK: Lineage: t1.c8 EXPRESSION [] +PREHOOK: query: explain select get_sql_schema('select * from t1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: explain select get_sql_schema('select * from t1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'select * from t1' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + function name: get_sql_schema + Select Operator + expressions: col_name (type: binary), col_type (type: binary) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: select get_sql_schema('select * from t1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select get_sql_schema('select * from t1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +t1.c1 int +t1.c2 float +t1.c3 double +t1.c4 string +t1.c5 date +t1.c6 array +t1.c7 struct +t1.c8 map +PREHOOK: query: create external table t2(c1 int, c2 float, c3 double, c4 string, c5 date, c6 array, c7 struct, c8 map) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 +POSTHOOK: query: create external table t2(c1 int, c2 float, c3 double, c4 string, c5 date, c6 array, c7 struct, c8 map) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t2 +PREHOOK: query: insert into t2 select 1, 1.1, 2.2, 'val1', '2019-02-15', array(1), named_struct('a',1,'b','2'), map(1,1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t2 +POSTHOOK: query: insert into t2 select 1, 1.1, 2.2, 'val1', '2019-02-15', array(1), named_struct('a',1,'b','2'), map(1,1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t2 +POSTHOOK: Lineage: t2.c1 SIMPLE [] +POSTHOOK: Lineage: t2.c2 EXPRESSION [] +POSTHOOK: Lineage: t2.c3 EXPRESSION [] +POSTHOOK: Lineage: t2.c4 SIMPLE [] +POSTHOOK: Lineage: t2.c5 EXPRESSION [] +POSTHOOK: Lineage: t2.c6 EXPRESSION [] +POSTHOOK: Lineage: t2.c7 EXPRESSION [] +POSTHOOK: Lineage: t2.c8 EXPRESSION [] +PREHOOK: query: explain select get_sql_schema('select * from t2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: explain select get_sql_schema('select * from t2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'select * from t2' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE + function name: get_sql_schema + Select Operator + expressions: col_name (type: binary), col_type (type: binary) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: select get_sql_schema('select * from t2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select get_sql_schema('select * from t2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +t2.c1 int +t2.c2 float +t2.c3 double +t2.c4 string +t2.c5 date +t2.c6 array +t2.c7 struct +t2.c8 map diff --git serde/src/java/org/apache/hadoop/hive/serde2/thrift/ColumnBuffer.java serde/src/java/org/apache/hadoop/hive/serde2/thrift/ColumnBuffer.java index efb4508f02..67c8f4547e 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/thrift/ColumnBuffer.java +++ serde/src/java/org/apache/hadoop/hive/serde2/thrift/ColumnBuffer.java @@ -381,7 +381,7 @@ public void addValue(Type type, Object field) { break; case FLOAT_TYPE: nulls.set(size, field == null); - doubleVars()[size] = field == null ? 0 : new Double(field.toString()); + doubleVars()[size] = field == null ? 0.0 : Double.parseDouble(field.toString()); break; case DOUBLE_TYPE: nulls.set(size, field == null); diff --git serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestUnionStructObjectInspector.java serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestUnionStructObjectInspector.java index efc96931ca..6e0cf479be 100644 --- serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestUnionStructObjectInspector.java +++ serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestUnionStructObjectInspector.java @@ -100,7 +100,7 @@ public void testUnionStructObjectInspector() throws Throwable { struct1.add(true); ArrayList struct2 = new ArrayList(2); struct2.add(1.0); - struct2.add(new Long(111)); + struct2.add(Long.valueOf(111)); ArrayList struct = new ArrayList(2); struct.add(struct1); struct.add(struct2); diff --git service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java index a0cb40457e..9165227ae7 100644 --- service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java +++ service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java @@ -86,7 +86,7 @@ public void apply(DirSearch ldap, String user) throws AuthenticationException { for (String groupDn : memberOf) { String shortName = LdapUtils.getShortName(groupDn); - if (groupFilter.contains(shortName)) { + if (groupFilter.stream().anyMatch(shortName::equalsIgnoreCase)) { LOG.debug("GroupMembershipKeyFilter passes: user '{}' is a member of '{}' group", user, groupDn); LOG.info("Authentication succeeded based on group membership"); diff --git service/src/test/org/apache/hive/service/auth/ldap/TestGroupFilter.java service/src/test/org/apache/hive/service/auth/ldap/TestGroupFilter.java index 36e58c32bd..9ea5361836 100644 --- service/src/test/org/apache/hive/service/auth/ldap/TestGroupFilter.java +++ service/src/test/org/apache/hive/service/auth/ldap/TestGroupFilter.java @@ -105,6 +105,49 @@ public void testGroupMembershipKeyFilterApplyPositive() filter.apply(search, "user3@mydomain.com"); } + @Test + public void testGroupMembershipKeyCaseInsensitiveFilterApplyPositive() + throws AuthenticationException, NamingException, IOException { + conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER, "hiveusers,g1"); + + when(search.findUserDn(eq("user1"))) + .thenReturn("cn=user1,ou=People,dc=example,dc=com"); + when(search.findUserDn(eq("cn=user2,dc=example,dc=com"))) + .thenReturn("cn=user2,ou=People,dc=example,dc=com"); + when(search.findUserDn(eq("user3@mydomain.com"))) + .thenReturn("cn=user3,ou=People,dc=example,dc=com"); + + when(search.findGroupsForUser(eq("cn=user1,ou=People,dc=example,dc=com"))) + .thenReturn(Arrays.asList( + "cn=SuperUsers,ou=Groups,dc=example,dc=com", + "cn=Office1,ou=Groups,dc=example,dc=com", + "cn=HiveUsers,ou=Groups,dc=example,dc=com", + "cn=G1,ou=Groups,dc=example,dc=com")); + when(search.findGroupsForUser(eq("cn=user2,ou=People,dc=example,dc=com"))) + .thenReturn(Arrays.asList( + "cn=HiveUsers,ou=Groups,dc=example,dc=com")); + when(search.findGroupsForUser(eq("cn=user3,ou=People,dc=example,dc=com"))) + .thenReturn(Arrays.asList( + "cn=G1,ou=Groups,dc=example,dc=com", + "cn=G2,ou=Groups,dc=example,dc=com")); + + Filter filter = factory.getInstance(conf); + filter.apply(search, "user1"); + filter.apply(search, "cn=user2,dc=example,dc=com"); + filter.apply(search, "user3@mydomain.com"); + } + + @Test(expected = AuthenticationException.class) + public void testGroupMembershipKeyCaseInsensitiveFilterApplyNegative() + throws AuthenticationException, NamingException, IOException { + conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER, "hiveusers,containsg1"); + + when(search.findGroupsForUser(eq("user1"))).thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); + + Filter filter = factory.getInstance(conf); + filter.apply(search, "user1"); + } + @Test(expected = AuthenticationException.class) public void testGroupMembershipKeyFilterApplyNegative() throws AuthenticationException, NamingException, IOException { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java index f6cdc4ce8e..0cf926f5c6 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java @@ -118,6 +118,7 @@ public FMSketch(int numBitVectors) { /** * Resets a distinctValueEstimator object to its original state. */ + @Override public void reset() { for (int i=0; i< numBitVectors; i++) { bitVector[i].clear(); @@ -211,6 +212,7 @@ private int generateHashForPCSA(long v) { return hash; } + @Override public void addToEstimator(long v) { /* Update summary bitVector : * Generate hash value of the long value and mod it by 2^bitVectorSize-1. @@ -251,16 +253,18 @@ public void addToEstimatorPCSA(long v) { bitVector[hash%numBitVectors].set(index); } + @Override public void addToEstimator(double d) { - int v = new Double(d).hashCode(); + int v = Double.hashCode(d); addToEstimator(v); } public void addToEstimatorPCSA(double d) { - int v = new Double(d).hashCode(); + int v = Double.hashCode(d); addToEstimatorPCSA(v); } + @Override public void addToEstimator(HiveDecimal decimal) { int v = decimal.hashCode(); addToEstimator(v); @@ -297,6 +301,7 @@ public long estimateNumDistinctValuesPCSA() { /* We use the Flajolet-Martin estimator to estimate the number of distinct values.FM uses the * location of the least significant zero as an estimate of log2(phi*ndvs). */ + @Override public long estimateNumDistinctValues() { int sumLeastSigZero = 0; double avgLeastSigZero; @@ -333,6 +338,7 @@ static int lengthFor(JavaDataModel model, Integer numVector) { return length; } + @Override public int lengthFor(JavaDataModel model) { return lengthFor(model, getNumBitVectors()); } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index c0ba8673d9..41f399becd 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1597,7 +1597,8 @@ private void drop_database_core(RawStore ms, String catName, ConfVars.BATCH_RETRIEVE_MAX); // First pass will drop the materialized views - List materializedViewNames = get_tables_by_type(name, ".*", TableType.MATERIALIZED_VIEW.toString()); + List materializedViewNames = getTablesByTypeCore(catName, name, ".*", + TableType.MATERIALIZED_VIEW.toString()); int startIndex = 0; // retrieve the tables from the metastore in batches to alleviate memory constraints while (startIndex < materializedViewNames.size()) { @@ -5265,7 +5266,7 @@ private void alter_table_core(String catName, String dbname, String name, Table try { ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern); ret = FilterUtils.filterTableNamesIfEnabled(isServerFilterEnabled, filterHook, - parsedDbName[CAT_NAME], dbname, ret); + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], ret); } catch (MetaException e) { ex = e; throw e; @@ -5287,7 +5288,9 @@ private void alter_table_core(String catName, String dbname, String name, Table Exception ex = null; String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern, TableType.valueOf(tableType)); + ret = getTablesByTypeCore(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern, tableType); + ret = FilterUtils.filterTableNamesIfEnabled(isServerFilterEnabled, filterHook, + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], ret); } catch (MetaException e) { ex = e; throw e; @@ -5300,6 +5303,27 @@ private void alter_table_core(String catName, String dbname, String name, Table return ret; } + private List getTablesByTypeCore(final String catName, final String dbname, + final String pattern, final String tableType) throws MetaException { + startFunction("getTablesByTypeCore", ": catName=" + catName + + ": db=" + dbname + " pat=" + pattern + ",type=" + tableType); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getTables(catName, dbname, pattern, TableType.valueOf(tableType)); + } catch (MetaException e) { + ex = e; + throw e; + } catch (Exception e) { + ex = e; + throw newMetaException(e); + } finally { + endFunction("getTablesByTypeCore", ret != null, ex); + } + return ret; + } + @Override public List get_materialized_views_for_rewriting(final String dbname) throws MetaException { @@ -5367,6 +5391,7 @@ private void alter_table_core(String catName, String dbname, String name, Table try { try { tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); + firePreEvent(new PreReadTableEvent(tbl, this)); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java index ed4a2efb4b..f4c8f65d48 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper; /** * Defines the method which must be implemented to be used using schema tool to support metastore diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java index 88bd42d1dc..49e19adf71 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java @@ -30,10 +30,10 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; - import com.google.common.collect.ImmutableMap; + +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.MetaStoreConnectionInfo; import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java index c111343701..536e0c57f7 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java @@ -73,7 +73,7 @@ public static void setPerfLogger(PerfLogger resetPerfLogger) { */ public void PerfLogBegin(String callerName, String method) { long startTime = System.currentTimeMillis(); - startTimes.put(method, new Long(startTime)); + startTimes.put(method, Long.valueOf(startTime)); if (LOG.isDebugEnabled()) { LOG.debug(""); } @@ -98,7 +98,7 @@ public long PerfLogEnd(String callerName, String method) { public long PerfLogEnd(String callerName, String method, String additionalInfo) { Long startTime = startTimes.get(method); long endTime = System.currentTimeMillis(); - endTimes.put(method, new Long(endTime)); + endTimes.put(method, Long.valueOf(endTime)); long duration = startTime == null ? -1 : endTime - startTime.longValue(); if (LOG.isDebugEnabled()) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/HiveSchemaHelper.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/HiveSchemaHelper.java index 2da07a5929..caf9c31ef7 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/HiveSchemaHelper.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; @@ -131,9 +131,6 @@ private static void logAndPrintToStdout(String msg) { /** * Find the type of given command - * - * @param dbCommand - * @return */ boolean isPartialCommand(String dbCommand) throws IllegalArgumentException; @@ -141,52 +138,37 @@ private static void logAndPrintToStdout(String msg) { * Parse the DB specific nesting format and extract the inner script name if any * * @param dbCommand command from parent script - * @return * @throws IllegalFormatException */ String getScriptName(String dbCommand) throws IllegalArgumentException; /** * Find if the given command is a nested script execution - * - * @param dbCommand - * @return */ boolean isNestedScript(String dbCommand); /** * Find if the given command should not be passed to DB - * - * @param dbCommand - * @return */ boolean isNonExecCommand(String dbCommand); /** * Get the SQL statement delimiter - * - * @return */ String getDelimiter(); /** * Get the SQL indentifier quotation character - * - * @return */ String getQuoteCharacter(); /** * Clear any client specific tags - * - * @return */ String cleanseCommand(String dbCommand); /** * Does the DB required table/column names quoted - * - * @return */ boolean needsQuotedIdentifier(); @@ -212,10 +194,8 @@ String buildCommand(String scriptDir, String scriptFile, boolean fixQuotes) throws IllegalFormatException, IOException; } - /*** - * Base implementation of NestedScriptParser - * abstractCommandParser. - * + /** + * Base implementation of NestedScriptParser abstractCommandParser. */ private static abstract class AbstractCommandParser implements NestedScriptParser { private List dbOpts; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/MetastoreSchemaTool.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/MetastoreSchemaTool.java index 2fac79fa79..1517fa1f01 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/MetastoreSchemaTool.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.OptionGroup; @@ -29,8 +29,8 @@ import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.MetaStoreConnectionInfo; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.NestedScriptParser; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolCommandLine.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolCommandLine.java similarity index 99% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolCommandLine.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolCommandLine.java index d2d0e66541..84b85e97b1 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolCommandLine.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolCommandLine.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; @@ -26,7 +26,6 @@ import org.apache.commons.cli.OptionGroup; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; import com.google.common.collect.ImmutableSet; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTask.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTask.java similarity index 95% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTask.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTask.java index 87ef6b5802..31f6f0c328 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTask.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTask.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import org.apache.hadoop.hive.metastore.HiveMetaException; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskAlterCatalog.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskAlterCatalog.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskAlterCatalog.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskAlterCatalog.java index 4e3b3d1321..e7472d0f3b 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskAlterCatalog.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskAlterCatalog.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskCreateCatalog.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskCreateCatalog.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskCreateCatalog.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskCreateCatalog.java index 785797052f..40d6c5752e 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskCreateCatalog.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskCreateCatalog.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskCreateUser.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskCreateUser.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskCreateUser.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskCreateUser.java index a9ad0058f0..f9b9e5352f 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskCreateUser.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskCreateUser.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import org.apache.hadoop.hive.metastore.HiveMetaException; import org.slf4j.Logger; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInfo.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInfo.java similarity index 90% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInfo.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInfo.java index cd1d57b8e4..b553b332d6 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInfo.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInfo.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import org.apache.hadoop.hive.metastore.HiveMetaException; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.MetaStoreConnectionInfo; /** * Print Hive version and schema version. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInit.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInit.java similarity index 97% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInit.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInit.java index e3fa495e21..7b15e169ad 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInit.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInit.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import java.io.IOException; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInitOrUpgrade.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInitOrUpgrade.java similarity index 97% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInitOrUpgrade.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInitOrUpgrade.java index 365fb36121..6b7fec0a19 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskInitOrUpgrade.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskInitOrUpgrade.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import org.apache.hadoop.hive.metastore.HiveMetaException; import org.slf4j.Logger; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskMoveDatabase.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveDatabase.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskMoveDatabase.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveDatabase.java index 8a9b9d1a35..7b06f3f718 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskMoveDatabase.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveDatabase.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskMoveTable.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveTable.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskMoveTable.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveTable.java index a8f9228ec2..70831b00df 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskMoveTable.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveTable.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskUpgrade.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskUpgrade.java similarity index 96% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskUpgrade.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskUpgrade.java index 5e71609507..0588342243 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskUpgrade.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskUpgrade.java @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import java.io.File; import java.io.IOException; import java.util.List; import org.apache.hadoop.hive.metastore.HiveMetaException; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.MetaStoreConnectionInfo; /** * Perform metastore schema upgrade. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskValidate.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskValidate.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskValidate.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskValidate.java index d86b457a27..9bf711eab9 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SchemaToolTaskValidate.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskValidate.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import java.io.BufferedReader; import java.io.File; @@ -43,8 +43,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.HiveMetaException; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; -import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.MetaStoreConnectionInfo; +import org.apache.hadoop.hive.metastore.tools.schematool.HiveSchemaHelper.NestedScriptParser; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 49c7d88fcb..23faa7444a 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -356,6 +356,7 @@ protected void testFilterForTables(boolean filterAtServer) throws Exception { } assertEquals(0, client.getTables(DBNAME1, "*").size()); + assertEquals(0, client.getTables(DBNAME1, "*", TableType.MANAGED_TABLE).size()); assertEquals(0, client.getAllTables(DBNAME1).size()); assertEquals(0, client.getTables(DBNAME1, TAB2).size()); } diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java new file mode 100644 index 0000000000..19fd6343ab --- /dev/null +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.events.PreEventContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +import java.util.List; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; + +/** + * Test the filtering behavior at HMS client and HMS server. The configuration at each test + * changes, and therefore HMS client and server are created for each test case + */ +@Category(MetastoreUnitTest.class) +public class TestHmsServerAuthorization { + + /** + * Implementation of MetaStorePreEventListener that throws MetaException when configured in + * its function onEvent(). + */ + public static class DummyAuthorizationListenerImpl extends MetaStorePreEventListener { + private static volatile boolean throwExceptionAtCall = false; + public DummyAuthorizationListenerImpl(Configuration config) { + super(config); + } + + @Override + public void onEvent(PreEventContext context) + throws MetaException, NoSuchObjectException, InvalidOperationException { + if (throwExceptionAtCall) { + throw new MetaException("Authorization fails"); + } + } + } + + private static HiveMetaStoreClient client; + private static Configuration conf; + + private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100; + + private static String dbName1 = "testdb1"; + private static String dbName2 = "testdb2"; + private static final String TAB1 = "tab1"; + private static final String TAB2 = "tab2"; + + + protected static HiveMetaStoreClient createClient(Configuration metaStoreConf) throws Exception { + try { + return new HiveMetaStoreClient(metaStoreConf); + } catch (Throwable e) { + System.err.println("Unable to open the metastore"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } + + @BeforeClass + public static void setUpForTest() throws Exception { + + // make sure env setup works + TestHmsServerAuthorization.DummyAuthorizationListenerImpl.throwExceptionAtCall = false; + + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetastoreConf.setClass(conf, ConfVars.PRE_EVENT_LISTENERS, DummyAuthorizationListenerImpl.class, + MetaStorePreEventListener.class); + MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true); + conf.set("hive.key1", "value1"); + conf.set("hive.key2", "http://www.example.com"); + conf.set("hive.key3", ""); + conf.set("hive.key4", "0"); + conf.set("datanucleus.autoCreateTables", "false"); + conf.set("hive.in.test", "true"); + + MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2); + MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST); + MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class"); + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CLIENT_FILTER_ENABLED, false); + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_SERVER_FILTER_ENABLED, false); + + MetaStoreTestUtils.setConfForStandloneMode(conf); + + client = createClient(conf); + } + + @AfterClass + public static void tearDown() throws Exception { + if (client != null) { + // make sure tear down works + DummyAuthorizationListenerImpl.throwExceptionAtCall = false; + + client.dropDatabase(dbName1, true, true, true); + client.dropDatabase(dbName2, true, true, true); + client.close(); + } + } + + /** + * This is called in each test after the configuration is set in each test case. + * @throws Exception + */ + protected void creatEnv(Configuration conf) throws Exception { + client.dropDatabase(dbName1, true, true, true); + client.dropDatabase(dbName2, true, true, true); + Database db1 = new DatabaseBuilder() + .setName(dbName1) + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(client, conf); + Database db2 = new DatabaseBuilder() + .setName(dbName2) + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(client, conf); + new TableBuilder() + .setDbName(dbName1) + .setTableName(TAB1) + .addCol("id", "int") + .addCol("name", "string") + .create(client, conf); + Table tab2 = new TableBuilder() + .setDbName(dbName1) + .setTableName(TAB2) + .addCol("id", "int") + .addPartCol("name", "string") + .create(client, conf); + new PartitionBuilder() + .inTable(tab2) + .addValue("value1") + .addToTable(client, conf); + new PartitionBuilder() + .inTable(tab2) + .addValue("value2") + .addToTable(client, conf); + } + + /** + * Test the pre-event listener is called in function get_fields at HMS server. + * @throws Exception + */ + @Test + public void testGetFields() throws Exception { + dbName1 = "db_test_get_fields_1"; + dbName2 = "db_test_get_fields_2"; + creatEnv(conf); + + // enable throwing exception, so we can check pre-envent listener is called + TestHmsServerAuthorization.DummyAuthorizationListenerImpl.throwExceptionAtCall = true; + + try { + List tableSchema = client.getFields(dbName1, TAB1); + fail("getFields() should fail with throw exception mode at server side"); + } catch (MetaException ex) { + boolean isMessageAuthorization = ex.getMessage().contains("Authorization fails"); + assertEquals(true, isMessageAuthorization); + } + } +} diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java index 2915720f82..6d8fd46f2a 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java @@ -19,7 +19,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.metastore.HiveMetaException; -import org.apache.hadoop.hive.metastore.tools.MetastoreSchemaTool; +import org.apache.hadoop.hive.metastore.tools.schematool.MetastoreSchemaTool; import org.junit.After; import org.junit.Assert; import org.junit.Before; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestMetastoreSchemaTool.java similarity index 97% rename from standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java rename to standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestMetastoreSchemaTool.java index 23ed3fac95..b4a0844be3 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestMetastoreSchemaTool.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java similarity index 99% rename from standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java rename to standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java index 878759defd..528d7b2589 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.metastore.tools; +package org.apache.hadoop.hive.metastore.tools.schematool; import java.io.BufferedWriter; import java.io.ByteArrayOutputStream; @@ -46,13 +46,9 @@ import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @Category(MetastoreCheckinTest.class) public class TestSchemaToolForMetastore { - private static final Logger LOG = LoggerFactory.getLogger(TestMetastoreSchemaTool.class); - private MetastoreSchemaTool schemaTool; private Connection conn; private Configuration conf;