diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 7014baa13a..288f339e1d 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -186,6 +186,8 @@ static void internalBeforeClassSetup(Map additionalProperties, b }); MetaStoreTestUtils.startMetaStoreWithRetry(hconf); + // re set the WAREHOUSE property to the test dir, as the previous command added a random port to it + hconf.set(MetastoreConf.ConfVars.WAREHOUSE.getVarname(), System.getProperty("test.warehouse.dir", "/tmp")); Path testPath = new Path(TEST_PATH); FileSystem fs = FileSystem.get(testPath.toUri(),hconf); @@ -3268,7 +3270,7 @@ public void testDumpWithTableDirMissing() throws IOException { run("CREATE TABLE " + dbName + ".normal(a int)", driver); run("INSERT INTO " + dbName + ".normal values (1)", driver); - Path path = new Path(System.getProperty("test.warehouse.dir", "")); + Path path = new Path(System.getProperty("test.warehouse.dir", "/tmp")); path = new Path(path, dbName.toLowerCase() + ".db"); path = new Path(path, "normal"); FileSystem fs = path.getFileSystem(hconf); @@ -3288,7 +3290,7 @@ public void testDumpWithPartitionDirMissing() throws IOException { run("CREATE TABLE " + dbName + ".normal(a int) PARTITIONED BY (part int)", driver); run("INSERT INTO " + dbName + ".normal partition (part= 124) values (1)", driver); - Path path = new Path(System.getProperty("test.warehouse.dir","")); + Path path = new Path(System.getProperty("test.warehouse.dir","/tmp")); path = new Path(path, dbName.toLowerCase()+".db"); path = new Path(path, "normal"); path = new Path(path, "part=124"); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java index c3d5f90d3e..c81c5749b6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.SortedMap; import java.util.TreeMap; import org.apache.commons.lang.StringUtils; @@ -84,8 +85,8 @@ public static DataOutputStream getOutputStream(Path outputFile, DDLOperationCont * @return {@code true} if item was added */ public static boolean addIfAbsentByName(WriteEntity newWriteEntity, Set outputs) { - for(WriteEntity writeEntity : outputs) { - if(writeEntity.getName().equalsIgnoreCase(newWriteEntity.getName())) { + for (WriteEntity writeEntity : outputs) { + if (writeEntity.getName().equalsIgnoreCase(newWriteEntity.getName())) { LOG.debug("Ignoring request to add {} because {} is present", newWriteEntity.toStringDetail(), writeEntity.toStringDetail()); return false; @@ -153,12 +154,12 @@ public static boolean allowOperationInReplicationScope(Hive db, String tableName return false; } - public static String propertiesToString(Map props, List exclude) { + public static String propertiesToString(Map props, Set exclude) { if (props.isEmpty()) { return ""; } - Map sortedProperties = new TreeMap(props); + SortedMap sortedProperties = new TreeMap(props); List realProps = new ArrayList(); for (Map.Entry e : sortedProperties.entrySet()) { if (e.getValue() != null && (exclude == null || !exclude.contains(e.getKey()))) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java index a7c6c123be..6eea86b8bf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java @@ -33,23 +33,16 @@ private static final long serialVersionUID = 1L; private DDLDesc ddlDesc; - boolean needLock = false; + private boolean needLock = false; /** ReadEntitites that are passed to the hooks. */ private Set inputs; /** List of WriteEntities that are passed to the hooks. */ private Set outputs; - public DDLWork() { - } - - public DDLWork(Set inputs, Set outputs) { + public DDLWork(Set inputs, Set outputs, DDLDesc ddlDesc) { this.inputs = inputs; this.outputs = outputs; - } - - public DDLWork(Set inputs, Set outputs, DDLDesc ddlDesc) { - this(inputs, outputs); this.ddlDesc = ddlDesc; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseSetPropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseSetPropertiesOperation.java index 7f73502d2b..12ec9e991e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseSetPropertiesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/AlterDatabaseSetPropertiesOperation.java @@ -27,7 +27,7 @@ * Operation process of altering a database's properties. */ public class AlterDatabaseSetPropertiesOperation - extends AbstractAlterDatabaseOperation { + extends AbstractAlterDatabaseOperation { public AlterDatabaseSetPropertiesOperation(DDLOperationContext context, AlterDatabaseSetPropertiesDesc desc) { super(context, desc); } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseOperation.java index a7eabe79e0..bc31974a7f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/CreateDatabaseOperation.java @@ -19,10 +19,10 @@ package org.apache.hadoop.hive.ql.ddl.database; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; @@ -66,7 +66,7 @@ private void makeLocationQualified(Database database) throws HiveException { database.setLocationUri(Utilities.getQualifiedPath(context.getConf(), new Path(database.getLocationUri()))); } else { // Location is not set we utilize METASTOREWAREHOUSE together with database name - Path path = new Path(HiveConf.getVar(context.getConf(), HiveConf.ConfVars.METASTOREWAREHOUSE), + Path path = new Path(MetastoreConf.getVar(context.getConf(), MetastoreConf.ConfVars.WAREHOUSE), database.getName().toLowerCase() + DATABASE_PATH_SUFFIX); String qualifiedPath = Utilities.getQualifiedPath(context.getConf(), path); database.setLocationUri(qualifiedPath); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/function/ShowFunctionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/function/ShowFunctionsDesc.java index 08977e1c95..9784b1af40 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/function/ShowFunctionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/function/ShowFunctionsDesc.java @@ -38,14 +38,10 @@ private final String pattern; public ShowFunctionsDesc(Path resFile) { - this(resFile, null, false); + this(resFile, null); } public ShowFunctionsDesc(Path resFile, String pattern) { - this(resFile, pattern, false); - } - - public ShowFunctionsDesc(Path resFile, String pattern, boolean isLikePattern) { this.resFile = resFile.toString(); this.pattern = pattern; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckOperation.java index ab8cf4699f..e027f4a611 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/MsckOperation.java @@ -60,8 +60,8 @@ public int execute() throws HiveException, IOException, TException { try (HiveMetaStoreClient msc = new HiveMetaStoreClient(context.getConf())) { Table table = msc.getTable(SessionState.get().getCurrentCatalog(), names[0], names[1]); String qualifiedTableName = Warehouse.getCatalogQualifiedTableName(table); - boolean msckEnablePartitionRetention = context.getConf().getBoolean( - MetastoreConf.ConfVars.MSCK_REPAIR_ENABLE_PARTITION_RETENTION.getHiveName(), false); + boolean msckEnablePartitionRetention = MetastoreConf.getBoolVar(context.getConf(), + MetastoreConf.ConfVars.MSCK_REPAIR_ENABLE_PARTITION_RETENTION); if (msckEnablePartitionRetention) { partitionExpirySeconds = PartitionManagementTask.getRetentionPeriodInSeconds(table); LOG.info("{} - Retention period ({}s) for partition is enabled for MSCK REPAIR..", qualifiedTableName, diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java index 4bf45fcc2a..b04404d76b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java @@ -48,7 +48,7 @@ public int execute() throws HiveException { // Write the results into the file try (DataOutputStream os = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { // Write a header for cliDriver - if(!sessionState.isHiveServerQuery()) { + if (!sessionState.isHiveServerQuery()) { writeHeader(os); } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsOperation.java index 805272e4d0..6f28855fd5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsOperation.java @@ -48,7 +48,7 @@ public int execute() throws HiveException { // Write the results into the file try (DataOutputStream os = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { - if(!sessionState.isHiveServerQuery()) { + if (!sessionState.isHiveServerQuery()) { writeHeader(os); } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java index 8c40fab184..e1f9fad454 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java @@ -124,7 +124,7 @@ protected StorageDescriptor getStorageDescriptor(Table tbl, Partition part) { return (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd()); } - public void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List partitions, + private void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List partitions, DDLOperationContext context, EnvironmentContext environmentContext, AbstractAlterTableDesc alterTable) throws HiveException { if (partitions == null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java index f713b22c54..1b6db58141 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java @@ -247,7 +247,7 @@ public String getTableName() { return tableName; } - public String getDatabaseName(){ + public String getDatabaseName() { return databaseName; } @@ -606,7 +606,7 @@ public void validate(HiveConf conf) } catch (Exception err) { LOG.error("Failed to get type info", err); } - if(null == pti){ + if (null == pti) { throw new SemanticException(ErrorMsg.PARTITION_COLUMN_NON_PRIMITIVE.getMsg() + " Found " + partCol + " of type: " + fs.getType()); } @@ -711,8 +711,8 @@ public void setReplicationSpec(ReplicationSpec replicationSpec) { * @return what kind of replication scope this drop is running under. * This can result in a "CREATE/REPLACE IF NEWER THAN" kind of semantic */ - public ReplicationSpec getReplicationSpec(){ - if (replicationSpec == null){ + public ReplicationSpec getReplicationSpec() { + if (replicationSpec == null) { this.replicationSpec = new ReplicationSpec(); } return this.replicationSpec; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableDesc.java index e1a1faba17..1410d60017 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableDesc.java @@ -68,11 +68,11 @@ public boolean isPurge() { * @return what kind of replication scope this drop is running under. * This can result in a "DROP IF OLDER THAN" kind of semantic */ - public ReplicationSpec getReplicationSpec(){ + public ReplicationSpec getReplicationSpec() { return replicationSpec; } - public boolean getValidationRequired(){ + public boolean getValidationRequired() { return validationRequired; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java index 6a3f00db84..ac54138c58 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java @@ -27,10 +27,13 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.Map.Entry; +import java.util.Set; +import java.util.SortedMap; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.Path; @@ -50,20 +53,25 @@ import org.apache.hive.common.util.HiveStringUtils; import org.stringtemplate.v4.ST; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + /** * Operation process showing the creation of a table. */ public class ShowCreateTableOperation extends DDLOperation { private static final String EXTERNAL = "external"; private static final String TEMPORARY = "temporary"; + private static final String NAME = "name"; private static final String LIST_COLUMNS = "columns"; - private static final String TBL_COMMENT = "tbl_comment"; - private static final String LIST_PARTITIONS = "partitions"; - private static final String SORT_BUCKET = "sort_bucket"; - private static final String SKEWED_INFO = "tbl_skewedinfo"; + private static final String COMMENT = "comment"; + private static final String PARTITIONS = "partitions"; + private static final String BUCKETS = "buckets"; + private static final String SKEWED = "skewedinfo"; private static final String ROW_FORMAT = "row_format"; - private static final String TBL_LOCATION = "tbl_location"; - private static final String TBL_PROPERTIES = "tbl_properties"; + private static final String LOCATION_BLOCK = "location_block"; + private static final String LOCATION = "location"; + private static final String PROPERTIES = "properties"; public ShowCreateTableOperation(DDLOperationContext context, ShowCreateTableDesc desc) { super(context, desc); @@ -73,198 +81,207 @@ public ShowCreateTableOperation(DDLOperationContext context, ShowCreateTableDesc public int execute() throws HiveException { // get the create table statement for the table and populate the output try (DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { - return showCreateTable(outStream); + Table table = context.getDb().getTable(desc.getTableName(), false); + String command = table.isView() ? + getCreateViewCommand(table) : + getCreateTableCommand(table); + outStream.write(command.getBytes(StandardCharsets.UTF_8)); + return 0; + } catch (IOException e) { + LOG.info("show create table: ", e); + return 1; } catch (Exception e) { throw new HiveException(e); } } - private int showCreateTable(DataOutputStream outStream) throws HiveException { - boolean needsLocation = true; - StringBuilder createTabCommand = new StringBuilder(); + private static final String CREATE_VIEW_COMMAND = "CREATE VIEW `%s` AS %s"; - Table tbl = context.getDb().getTable(desc.getTableName(), false); - List duplicateProps = new ArrayList(); - try { - needsLocation = CreateTableOperation.doesTableNeedLocation(tbl); + private String getCreateViewCommand(Table table) { + return String.format(CREATE_VIEW_COMMAND, desc.getTableName(), table.getViewExpandedText()); + } - if (tbl.isView()) { - String createTabStmt = "CREATE VIEW `" + desc.getTableName() + "` AS " + tbl.getViewExpandedText(); - outStream.write(createTabStmt.getBytes(StandardCharsets.UTF_8)); - return 0; - } + private static final String CREATE_TABLE_TEMPLATE = + "CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `<" + NAME + ">`(\n" + + "<" + LIST_COLUMNS + ">)\n" + + "<" + COMMENT + ">\n" + + "<" + PARTITIONS + ">\n" + + "<" + BUCKETS + ">\n" + + "<" + SKEWED + ">\n" + + "<" + ROW_FORMAT + ">\n" + + "<" + LOCATION_BLOCK + ">" + + "TBLPROPERTIES (\n" + + "<" + PROPERTIES + ">)\n"; - createTabCommand.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `"); - createTabCommand.append(desc.getTableName() + "`(\n"); - createTabCommand.append("<" + LIST_COLUMNS + ">)\n"); - createTabCommand.append("<" + TBL_COMMENT + ">\n"); - createTabCommand.append("<" + LIST_PARTITIONS + ">\n"); - createTabCommand.append("<" + SORT_BUCKET + ">\n"); - createTabCommand.append("<" + SKEWED_INFO + ">\n"); - createTabCommand.append("<" + ROW_FORMAT + ">\n"); - if (needsLocation) { - createTabCommand.append("LOCATION\n"); - createTabCommand.append("<" + TBL_LOCATION + ">\n"); - } - createTabCommand.append("TBLPROPERTIES (\n"); - createTabCommand.append("<" + TBL_PROPERTIES + ">)\n"); - ST createTabStmt = new ST(createTabCommand.toString()); - - // For cases where the table is temporary - String tblTemp = ""; - if (tbl.isTemporary()) { - duplicateProps.add("TEMPORARY"); - tblTemp = "TEMPORARY "; - } - // For cases where the table is external - String tblExternal = ""; - if (tbl.getTableType() == TableType.EXTERNAL_TABLE) { - duplicateProps.add("EXTERNAL"); - tblExternal = "EXTERNAL "; - } + private String getCreateTableCommand(Table table) { + ST command = new ST(CREATE_TABLE_TEMPLATE); - // Columns - String tblColumns = ""; - List cols = tbl.getCols(); - List columns = new ArrayList(); - for (FieldSchema col : cols) { - String columnDesc = " `" + col.getName() + "` " + col.getType(); - if (col.getComment() != null) { - columnDesc = columnDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'"; - } - columns.add(columnDesc); - } - tblColumns = StringUtils.join(columns, ", \n"); - - // Table comment - String tblComment = ""; - String tabComment = tbl.getProperty("comment"); - if (tabComment != null) { - duplicateProps.add("comment"); - tblComment = "COMMENT '" + HiveStringUtils.escapeHiveCommand(tabComment) + "'"; - } + command.add(NAME, desc.getTableName()); + command.add(TEMPORARY, getTemporary(table)); + command.add(EXTERNAL, getExternal(table)); + command.add(LIST_COLUMNS, getColumns(table)); + command.add(COMMENT, getComment(table)); + command.add(PARTITIONS, getPartitions(table)); + command.add(BUCKETS, getBuckets(table)); + command.add(SKEWED, getSkewed(table)); + command.add(ROW_FORMAT, getRowFormat(table)); + command.add(LOCATION_BLOCK, getLocationBlock(table)); + command.add(PROPERTIES, getProperties(table)); + + return command.render(); + } + + private String getTemporary(Table table) { + return table.isTemporary() ? "TEMPORARY " : ""; + } + + private String getExternal(Table table) { + return table.getTableType() == TableType.EXTERNAL_TABLE ? "EXTERNAL " : ""; + } - // Partitions - String tblPartitions = ""; - List partKeys = tbl.getPartitionKeys(); - if (partKeys.size() > 0) { - tblPartitions += "PARTITIONED BY ( \n"; - List partCols = new ArrayList(); - for (FieldSchema partKey : partKeys) { - String partColDesc = " `" + partKey.getName() + "` " + partKey.getType(); - if (partKey.getComment() != null) { - partColDesc = partColDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(partKey.getComment()) + "'"; - } - partCols.add(partColDesc); - } - tblPartitions += StringUtils.join(partCols, ", \n"); - tblPartitions += ")"; + private String getColumns(Table table) { + List columnDescs = new ArrayList(); + for (FieldSchema col : table.getCols()) { + String columnDesc = " `" + col.getName() + "` " + col.getType(); + if (col.getComment() != null) { + columnDesc += " COMMENT '" + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'"; } + columnDescs.add(columnDesc); + } + return StringUtils.join(columnDescs, ", \n"); + } + + private String getComment(Table table) { + String comment = table.getProperty("comment"); + return (comment != null) ? "COMMENT '" + HiveStringUtils.escapeHiveCommand(comment) + "'" : ""; + } - // Clusters (Buckets) - String tblSortBucket = ""; - List buckCols = tbl.getBucketCols(); - if (buckCols.size() > 0) { - duplicateProps.add("SORTBUCKETCOLSPREFIX"); - tblSortBucket += "CLUSTERED BY ( \n "; - tblSortBucket += StringUtils.join(buckCols, ", \n "); - tblSortBucket += ") \n"; - List sortCols = tbl.getSortCols(); - if (sortCols.size() > 0) { - tblSortBucket += "SORTED BY ( \n"; - // Order - List sortKeys = new ArrayList(); - for (Order sortCol : sortCols) { - String sortKeyDesc = " " + sortCol.getCol() + " " + DirectionUtils.codeToText(sortCol.getOrder()); - sortKeys.add(sortKeyDesc); - } - tblSortBucket += StringUtils.join(sortKeys, ", \n"); - tblSortBucket += ") \n"; - } - tblSortBucket += "INTO " + tbl.getNumBuckets() + " BUCKETS"; + private String getPartitions(Table table) { + List partitionKeys = table.getPartitionKeys(); + if (partitionKeys.isEmpty()) { + return ""; + } + + List partitionDescs = new ArrayList(); + for (FieldSchema partitionKey : partitionKeys) { + String partitionDesc = " `" + partitionKey.getName() + "` " + partitionKey.getType(); + if (partitionKey.getComment() != null) { + partitionDesc += " COMMENT '" + HiveStringUtils.escapeHiveCommand(partitionKey.getComment()) + "'"; } + partitionDescs.add(partitionDesc); + } + return "PARTITIONED BY ( \n" + StringUtils.join(partitionDescs, ", \n") + ")"; + } + + private String getBuckets(Table table) { + List bucketCols = table.getBucketCols(); + if (bucketCols.isEmpty()) { + return ""; + } + + String buckets = "CLUSTERED BY ( \n " + StringUtils.join(bucketCols, ", \n ") + ") \n"; - // Skewed Info - StringBuilder tblSkewedInfo = new StringBuilder(); - SkewedInfo skewedInfo = tbl.getSkewedInfo(); - if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) { - tblSkewedInfo.append("SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n"); - tblSkewedInfo.append(" ON ("); - List colValueList = new ArrayList(); - for (List colValues : skewedInfo.getSkewedColValues()) { - colValueList.add("('" + StringUtils.join(colValues, "','") + "')"); - } - tblSkewedInfo.append(StringUtils.join(colValueList, ",") + ")"); - if (tbl.isStoredAsSubDirectories()) { - tblSkewedInfo.append("\n STORED AS DIRECTORIES"); - } + List sortColumns = table.getSortCols(); + if (!sortColumns.isEmpty()) { + List sortKeys = new ArrayList(); + for (Order sortColumn : sortColumns) { + String sortKeyDesc = " " + sortColumn.getCol() + " " + DirectionUtils.codeToText(sortColumn.getOrder()); + sortKeys.add(sortKeyDesc); } + buckets += "SORTED BY ( \n" + StringUtils.join(sortKeys, ", \n") + ") \n"; + } + + buckets += "INTO " + table.getNumBuckets() + " BUCKETS"; + return buckets; + } + + private String getSkewed(Table table) { + SkewedInfo skewedInfo = table.getSkewedInfo(); + if (skewedInfo == null || skewedInfo.getSkewedColNames().isEmpty()) { + return ""; + } + + List columnValuesList = new ArrayList(); + for (List columnValues : skewedInfo.getSkewedColValues()) { + columnValuesList.add("('" + StringUtils.join(columnValues, "','") + "')"); + } - // Row format (SerDe) - StringBuilder tblRowFormat = new StringBuilder(); - StorageDescriptor sd = tbl.getTTable().getSd(); - SerDeInfo serdeInfo = sd.getSerdeInfo(); - Map serdeParams = serdeInfo.getParameters(); - tblRowFormat.append("ROW FORMAT SERDE \n"); - tblRowFormat.append(" '" + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n"); - if (tbl.getStorageHandler() == null) { - // If serialization.format property has the default value, it will not to be included in - // SERDE properties - if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(serdeConstants.SERIALIZATION_FORMAT))) { - serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT); - } - if (!serdeParams.isEmpty()) { - appendSerdeParams(tblRowFormat, serdeParams).append(" \n"); - } - tblRowFormat.append("STORED AS INPUTFORMAT \n '" - + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n"); - tblRowFormat.append("OUTPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'"); - } else { - duplicateProps.add(META_TABLE_STORAGE); - tblRowFormat.append("STORED BY \n '" + - HiveStringUtils.escapeHiveCommand(tbl.getParameters().get(META_TABLE_STORAGE)) + "' \n"); - // SerDe Properties - if (!serdeParams.isEmpty()) { - appendSerdeParams(tblRowFormat, serdeInfo.getParameters()); - } + String skewed = + "SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n" + + " ON (" + StringUtils.join(columnValuesList, ",") + ")"; + if (table.isStoredAsSubDirectories()) { + skewed += "\n STORED AS DIRECTORIES"; + } + return skewed; + } + + private String getRowFormat(Table table) { + StringBuilder rowFormat = new StringBuilder(); + + StorageDescriptor sd = table.getTTable().getSd(); + SerDeInfo serdeInfo = sd.getSerdeInfo(); + + rowFormat + .append("ROW FORMAT SERDE \n") + .append(" '" + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n"); + + Map serdeParams = serdeInfo.getParameters(); + if (table.getStorageHandler() == null) { + // If serialization.format property has the default value, it will not to be included in SERDE properties + if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(serdeConstants.SERIALIZATION_FORMAT))) { + serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT); } - String tblLocation = " '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'"; - - // Table properties - duplicateProps.addAll(StatsSetupConst.TABLE_PARAMS_STATS_KEYS); - String tblProperties = DDLUtils.propertiesToString(tbl.getParameters(), duplicateProps); - - createTabStmt.add(TEMPORARY, tblTemp); - createTabStmt.add(EXTERNAL, tblExternal); - createTabStmt.add(LIST_COLUMNS, tblColumns); - createTabStmt.add(TBL_COMMENT, tblComment); - createTabStmt.add(LIST_PARTITIONS, tblPartitions); - createTabStmt.add(SORT_BUCKET, tblSortBucket); - createTabStmt.add(SKEWED_INFO, tblSkewedInfo); - createTabStmt.add(ROW_FORMAT, tblRowFormat); - // Table location should not be printed with hbase backed tables - if (needsLocation) { - createTabStmt.add(TBL_LOCATION, tblLocation); + if (!serdeParams.isEmpty()) { + appendSerdeParams(rowFormat, serdeParams); + rowFormat.append(" \n"); + } + rowFormat + .append("STORED AS INPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n") + .append("OUTPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'"); + } else { + String metaTableStorage = table.getParameters().get(META_TABLE_STORAGE); + rowFormat.append("STORED BY \n '" + HiveStringUtils.escapeHiveCommand(metaTableStorage) + "' \n"); + if (!serdeParams.isEmpty()) { + appendSerdeParams(rowFormat, serdeInfo.getParameters()); } - createTabStmt.add(TBL_PROPERTIES, tblProperties); - - outStream.write(createTabStmt.render().getBytes(StandardCharsets.UTF_8)); - } catch (IOException e) { - LOG.info("show create table: ", e); - return 1; } - return 0; + return rowFormat.toString(); } - public static StringBuilder appendSerdeParams(StringBuilder builder, Map serdeParam) { - serdeParam = new TreeMap(serdeParam); - builder.append("WITH SERDEPROPERTIES ( \n"); + public static void appendSerdeParams(StringBuilder builder, Map serdeParams) { + SortedMap sortedSerdeParams = new TreeMap(serdeParams); List serdeCols = new ArrayList(); - for (Entry entry : serdeParam.entrySet()) { + for (Entry entry : sortedSerdeParams.entrySet()) { serdeCols.add(" '" + entry.getKey() + "'='" + HiveStringUtils.escapeHiveCommand(entry.getValue()) + "'"); } - builder.append(StringUtils.join(serdeCols, ", \n")).append(')'); - return builder; + + builder + .append("WITH SERDEPROPERTIES ( \n") + .append(StringUtils.join(serdeCols, ", \n")) + .append(')'); + } + + private static final String CREATE_TABLE_TEMPLATE_LOCATION = + "LOCATION\n" + + "<" + LOCATION + ">\n"; + + private String getLocationBlock(Table table) { + if (!CreateTableOperation.doesTableNeedLocation(table)) { + return ""; + } + + ST locationBlock = new ST(CREATE_TABLE_TEMPLATE_LOCATION); + StorageDescriptor sd = table.getTTable().getSd(); + locationBlock.add(LOCATION, " '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'"); + return locationBlock.render(); + } + + private static final Set PROPERTIES_TO_IGNORE_AT_TBLPROPERTIES = Sets.union( + ImmutableSet.of("TEMPORARY", "EXTERNAL", "comment", "SORTBUCKETCOLSPREFIX", META_TABLE_STORAGE), + new HashSet(StatsSetupConst.TABLE_PARAMS_STATS_KEYS)); + + private String getProperties(Table table) { + return DDLUtils.propertiesToString(table.getParameters(), PROPERTIES_TO_IGNORE_AT_TBLPROPERTIES); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java index d48ae0485b..04d0aa1707 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java @@ -63,7 +63,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; -import avro.shaded.com.google.common.collect.Lists; +import com.google.common.collect.Lists; /** * Operation process of dropping a table. diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java index 2636530434..69414f24e9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java @@ -185,7 +185,7 @@ private ShowLocksResponse getLocksForNewFormat(HiveLockManager lockMgr) throws H public static void dumpLockInfo(DataOutputStream os, ShowLocksResponse response) throws IOException { SessionState sessionState = SessionState.get(); // Write a header for CliDriver - if(!sessionState.isHiveServerQuery()) { + if (!sessionState.isHiveServerQuery()) { os.writeBytes("Lock ID"); os.write(Utilities.tabCode); os.writeBytes("Database"); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java index d25bf3cdee..72db45755a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; import org.apache.hadoop.hive.ql.exec.Task; @@ -70,7 +71,7 @@ protected void doAlteration(Table table, Partition partition) throws HiveExcepti } else { if (!table.getPartitionKeys().isEmpty()) { PartitionIterable parts = new PartitionIterable(context.getDb(), table, null, - HiveConf.getIntVar(context.getConf(), ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + MetastoreConf.getIntVar(context.getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX)); for (Partition part : parts) { checkMmLb(part); } @@ -112,7 +113,7 @@ protected void doAlteration(Table table, Partition partition) throws HiveExcepti if (!table.getPartitionKeys().isEmpty()) { PartitionIterable parts = new PartitionIterable(context.getDb(), table, null, - HiveConf.getIntVar(context.getConf(), ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + MetastoreConf.getIntVar(context.getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX)); for (Partition part : parts) { checkMmLb(part); Path source = part.getDataLocation(); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java index bc068ed2ec..9339144408 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java @@ -211,9 +211,9 @@ public void setReplicationSpec(ReplicationSpec replicationSpec) { * @return what kind of replication scope this drop is running under. * This can result in a "CREATE/REPLACE IF NEWER THAN" kind of semantic */ - public ReplicationSpec getReplicationSpec(){ - if (replicationSpec == null){ - replicationSpec = new ReplicationSpec(); + public ReplicationSpec getReplicationSpec() { + if (replicationSpec == null) { + this.replicationSpec = new ReplicationSpec(); } return replicationSpec; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java index 01e505a73b..990326493e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java @@ -96,7 +96,7 @@ public boolean getIfPurge() { * @return what kind of replication scope this drop is running under. * This can result in a "DROP IF OLDER THAN" kind of semantic */ - public ReplicationSpec getReplicationSpec(){ + public ReplicationSpec getReplicationSpec() { return replicationSpec; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java index c084662e41..e7cc6d3396 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java @@ -84,14 +84,14 @@ private void dropPartitionForReplication(Table tbl, ReplicationSpec replicationS return; } - for (AlterTableDropPartitionDesc.PartitionDesc partSpec : desc.getPartSpecs()){ + for (AlterTableDropPartitionDesc.PartitionDesc partSpec : desc.getPartSpecs()) { List partitions = new ArrayList<>(); try { context.getDb().getPartitionsByExpr(tbl, partSpec.getPartSpec(), context.getConf(), partitions); for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())) { context.getDb().dropPartition(tbl.getDbName(), tbl.getTableName(), p.getValues(), true); } - } catch (NoSuchObjectException e){ + } catch (NoSuchObjectException e) { // ignore NSOE because that means there's nothing to drop. } catch (Exception e) { throw new HiveException(e.getMessage(), e); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableArchiveOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableArchiveOperation.java index 69f5f23cc1..315857bc69 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableArchiveOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableArchiveOperation.java @@ -130,7 +130,7 @@ private Path getOriginalDir(Table table, PartSpecInfo partitionSpecInfo, List