diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 11d0743..18bf172 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -18,9 +18,6 @@ */ package org.apache.hive.hcatalog.cli.SemanticAnalysis; -import java.io.Serializable; -import java.util.List; - import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -50,6 +47,9 @@ import org.apache.hive.hcatalog.common.ErrorType; import org.apache.hive.hcatalog.common.HCatException; +import java.io.Serializable; +import java.util.List; + public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase { private AbstractSemanticAnalyzerHook hook; @@ -237,7 +237,6 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, case HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION: case HiveParser.TOK_ALTERTABLE_SKEWED: case HiveParser.TOK_ALTERTABLE_FILEFORMAT: - case HiveParser.TOK_ALTERTABLE_PROTECTMODE: case HiveParser.TOK_ALTERTABLE_LOCATION: case HiveParser.TOK_ALTERTABLE_MERGEFILES: case HiveParser.TOK_ALTERTABLE_RENAMEPART: diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java index 3a69581..41571fc 100644 --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java @@ -18,13 +18,6 @@ */ package org.apache.hive.hcatalog.api; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - import com.google.common.base.Function; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -76,6 +69,12 @@ import org.slf4j.LoggerFactory; import javax.annotation.Nullable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; /** * The HCatClientHMSImpl is the Hive Metastore client based implementation of @@ -588,7 +587,6 @@ private void dropPartitionsUsingExpressions(Table table, Map par Utilities.serializeExpressionToKryo(partitionExpression)); hmsClient.dropPartitions(table.getDbName(), table.getTableName(), Arrays.asList(serializedPartitionExpression), deleteData && !isExternal(table), // Delete data? - false, // Ignore Protection? ifExists, // Fail if table doesn't exist? false); // Need results back? } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 0bcd053..1cc99ae 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -225,7 +225,9 @@ import java.util.regex.Pattern; import static org.apache.commons.lang.StringUtils.join; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.*; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; /** * TODO:pc remove application logic to a separate interface. @@ -2777,10 +2779,9 @@ public DropPartitionsResult drop_partitions_req( } for (Partition part : parts) { - if (!ignoreProtection && !MetaStoreUtils.canDropPartition(tbl, part)) { - throw new MetaException("Table " + tbl.getTableName() - + " Partition " + part + " is protected from being dropped"); - } + + // TODO - we need to speed this up for the normal path where all partitions are under + // the table and we don't have to stat every partition firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); if (colNames != null) { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index a5f5053..66fbfe4 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -842,7 +842,6 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ rps.setExprs(exprs); DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps); req.setDeleteData(options.deleteData); - req.setIgnoreProtection(options.ignoreProtection); req.setNeedResult(options.returnResults); req.setIfExists(options.ifExists); if (options.purgeData) { @@ -854,13 +853,12 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ @Override public List dropPartitions(String dbName, String tblName, - List> partExprs, boolean deleteData, boolean ignoreProtection, + List> partExprs, boolean deleteData, boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException { return dropPartitions(dbName, tblName, partExprs, PartitionDropOptions.instance() .deleteData(deleteData) - .ignoreProtection(ignoreProtection) .ifExists(ifExists) .returnResults(needResult)); @@ -868,13 +866,12 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ @Override public List dropPartitions(String dbName, String tblName, - List> partExprs, boolean deleteData, boolean ignoreProtection, + List> partExprs, boolean deleteData, boolean ifExists) throws NoSuchObjectException, MetaException, TException { // By default, we need the results from dropPartitions(); return dropPartitions(dbName, tblName, partExprs, PartitionDropOptions.instance() .deleteData(deleteData) - .ignoreProtection(ignoreProtection) .ifExists(ifExists)); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 341b0ca..147ffcc 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -19,50 +19,30 @@ package org.apache.hadoop.hive.metastore; +import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventResponse; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.NoSuchLockException; -import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; -import org.apache.hadoop.hive.metastore.api.TxnAbortedException; -import org.apache.hadoop.hive.metastore.api.TxnOpenException; -import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; -import org.apache.thrift.TException; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FireEventRequest; +import org.apache.hadoop.hive.metastore.api.FireEventResponse; import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.Index; @@ -70,8 +50,15 @@ import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchLockException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionEventType; import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; @@ -79,10 +66,20 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnOpenException; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.thrift.TException; + +import java.io.IOException; +import java.util.List; +import java.util.Map; /** * Wrapper around hive metastore thrift api @@ -684,11 +681,11 @@ boolean dropPartition(String db_name, String tbl_name, List part_vals, PartitionDropOptions options) throws TException; List dropPartitions(String dbName, String tblName, - List> partExprs, boolean deleteData, boolean ignoreProtection, + List> partExprs, boolean deleteData, boolean ifExists) throws NoSuchObjectException, MetaException, TException; List dropPartitions(String dbName, String tblName, - List> partExprs, boolean deleteData, boolean ignoreProtection, + List> partExprs, boolean deleteData, boolean ifExists, boolean needResults) throws NoSuchObjectException, MetaException, TException; /** diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 38dc406..907cbbf 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -1604,32 +1604,6 @@ private static String getPartitionValWithInvalidCharacter(List partVals, return null; } - public static ProtectMode getProtectMode(Partition partition) { - return getProtectMode(partition.getParameters()); - } - - public static ProtectMode getProtectMode(Table table) { - return getProtectMode(table.getParameters()); - } - - private static ProtectMode getProtectMode(Map parameters) { - if (parameters == null) { - return null; - } - - if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) { - return new ProtectMode(); - } else { - return ProtectMode.getProtectModeFromString(parameters.get(ProtectMode.PARAMETER_NAME)); - } - } - - public static boolean canDropPartition(Table table, Partition partition) { - ProtectMode mode = getProtectMode(partition); - ProtectMode parentMode = getProtectMode(table); - return (!mode.noDrop && !mode.offline && !mode.readOnly && !parentMode.noDropCascade); - } - public static String ARCHIVING_LEVEL = "archiving_level"; public static int getArchivingLevel(Partition part) throws MetaException { if (!isArchived(part)) { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java index 5b2811f..e8ffbd5 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java @@ -24,7 +24,6 @@ public class PartitionDropOptions { public boolean deleteData = true; - public boolean ignoreProtection = false; public boolean ifExists = false; public boolean returnResults = true; public boolean purgeData = false; @@ -36,11 +35,6 @@ public PartitionDropOptions deleteData(boolean deleteData) { return this; } - public PartitionDropOptions ignoreProtection(boolean ignoreProtection) { - this.ignoreProtection = ignoreProtection; - return this; - } - public PartitionDropOptions ifExists(boolean ifExists) { this.ifExists = ifExists; return this; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java deleted file mode 100644 index b8f1390..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - - -public class ProtectMode { - public static String PARAMETER_NAME = "PROTECT_MODE"; - - public static String FLAG_OFFLINE = "OFFLINE"; - public static String FLAG_NO_DROP = "NO_DROP"; - public static String FLAG_NO_DROP_CASCADE = "NO_DROP_CASCADE"; - public static String FLAG_READ_ONLY = "READ_ONLY"; - - public boolean offline = false; - public boolean readOnly = false; - public boolean noDrop = false; - public boolean noDropCascade = false; - - static public ProtectMode getProtectModeFromString(String sourceString) { - return new ProtectMode(sourceString); - } - - private ProtectMode(String sourceString) { - String[] tokens = sourceString.split(","); - for (String token: tokens) { - if (token.equalsIgnoreCase(FLAG_OFFLINE)) { - offline = true; - } else if (token.equalsIgnoreCase(FLAG_NO_DROP)) { - noDrop = true; - } else if (token.equalsIgnoreCase(FLAG_NO_DROP_CASCADE)) { - noDropCascade = true; - } else if (token.equalsIgnoreCase(FLAG_READ_ONLY)) { - readOnly = true; - } - } - } - - public ProtectMode() { - } - - @Override - public String toString() { - String retString = null; - - if (offline) { - retString = FLAG_OFFLINE; - } - - if (noDrop) { - if (retString != null) { - retString = retString + "," + FLAG_NO_DROP; - } - else - { - retString = FLAG_NO_DROP; - } - } - - if (noDropCascade) { - if (retString != null) { - retString = retString + "," + FLAG_NO_DROP_CASCADE; - } - else - { - retString = FLAG_NO_DROP_CASCADE; - } - } - - if (readOnly) { - if (retString != null) { - retString = retString + "," + FLAG_READ_ONLY; - } - else - { - retString = FLAG_READ_ONLY; - } - } - - return retString; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 8bcf860..e32b2a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -65,7 +65,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.PartitionDropOptions; -import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; @@ -3345,17 +3344,6 @@ static StringBuilder appendNonNull(StringBuilder builder, Object value, boolean return builder; } - private void setAlterProtectMode(boolean protectModeEnable, - AlterTableDesc.ProtectModeType protectMode, - ProtectMode mode) { - if (protectMode == AlterTableDesc.ProtectModeType.OFFLINE) { - mode.offline = protectModeEnable; - } else if (protectMode == AlterTableDesc.ProtectModeType.NO_DROP) { - mode.noDrop = protectModeEnable; - } else if (protectMode == AlterTableDesc.ProtectModeType.NO_DROP_CASCADE) { - mode.noDropCascade = protectModeEnable; - } - } /** * Alter a given table. * @@ -3588,20 +3576,6 @@ private int alterTableOrSinglePartition(AlterTableDesc alterTbl, Table tbl, Part if (alterTbl.getSerdeName() != null) { sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName()); } - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { - boolean protectModeEnable = alterTbl.isProtectModeEnable(); - AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType(); - - ProtectMode mode = null; - if (part != null) { - mode = part.getProtectMode(); - setAlterProtectMode(protectModeEnable, protectMode, mode); - part.setProtectMode(mode); - } else { - mode = tbl.getProtectMode(); - setAlterProtectMode(protectModeEnable,protectMode, mode); - tbl.setProtectMode(mode); - } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) { StorageDescriptor sd = (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd()); // validate sort columns and bucket columns @@ -3770,7 +3744,6 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi dropTbl.getPartSpecs(), PartitionDropOptions.instance() .deleteData(true) - .ignoreProtection(dropTbl.getIgnoreProtection()) .ifExists(true) .purgeData(dropTbl.getIfPurge())); for (Partition partition : droppedParts) { @@ -3801,11 +3774,6 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc } } - if (tbl != null && !tbl.canDrop()) { - throw new HiveException("Table " + tbl.getTableName() + - " is protected from being dropped"); - } - ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); if ((tbl!= null) && replicationSpec.isInReplicationScope()){ /** @@ -3849,24 +3817,6 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc int partitionBatchSize = HiveConf.getIntVar(conf, ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX); - // We should check that all the partitions of the table can be dropped - if (tbl != null && tbl.isPartitioned()) { - List partitionNames = db.getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short)-1); - - for(int i=0; i < partitionNames.size(); i+= partitionBatchSize) { - List partNames = partitionNames.subList(i, Math.min(i+partitionBatchSize, - partitionNames.size())); - List listPartitions = db.getPartitionsByNames(tbl, partNames); - for (Partition p: listPartitions) { - if (!p.canDrop()) { - throw new HiveException("Table " + tbl.getTableName() + - " Partition" + p.getName() + - " is protected from being dropped"); - } - } - } - } - // drop the table db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java index 968c1e1..94cb6be 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.hooks; -import java.io.Serializable; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; @@ -29,6 +27,8 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import java.io.Serializable; + /** * This class encapsulates an object that is being written to by the query. This * object may be a table, partition, dfs directory or a local directory. @@ -193,7 +193,6 @@ public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTy case REPLACECOLS: case ARCHIVE: case UNARCHIVE: - case ALTERPROTECTMODE: case ALTERPARTITIONPROTECTMODE: case ALTERLOCATION: case DROPPARTITION: diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index a6193ba..ba8af8c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -18,30 +18,7 @@ package org.apache.hadoop.hive.ql.metadata; -import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; -import static org.apache.hadoop.hive.serde.serdeConstants.COLLECTION_DELIM; -import static org.apache.hadoop.hive.serde.serdeConstants.ESCAPE_CHAR; -import static org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM; -import static org.apache.hadoop.hive.serde.serdeConstants.LINE_DELIM; -import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM; -import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT; -import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - +import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -122,7 +99,29 @@ import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; -import com.google.common.collect.Sets; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; +import static org.apache.hadoop.hive.serde.serdeConstants.COLLECTION_DELIM; +import static org.apache.hadoop.hive.serde.serdeConstants.ESCAPE_CHAR; +import static org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM; +import static org.apache.hadoop.hive.serde.serdeConstants.LINE_DELIM; +import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM; +import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT; +import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME; /** @@ -1976,19 +1975,17 @@ public boolean dropPartition(String dbName, String tableName, List partV } public List dropPartitions(String tblName, List partSpecs, - boolean deleteData, boolean ignoreProtection, boolean ifExists) throws HiveException { + boolean deleteData, boolean ifExists) throws HiveException { String[] names = Utilities.getDbTableName(tblName); - return dropPartitions( - names[0], names[1], partSpecs, deleteData, ignoreProtection, ifExists); + return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); } public List dropPartitions(String dbName, String tblName, - List partSpecs, boolean deleteData, boolean ignoreProtection, + List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { return dropPartitions(dbName, tblName, partSpecs, PartitionDropOptions.instance() .deleteData(deleteData) - .ignoreProtection(ignoreProtection) .ifExists(ifExists)); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 08ff2e9..2e77bc4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -34,7 +34,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.metastore.MetaStoreUtils; -import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -541,56 +540,6 @@ public void setValues(Map partSpec) } /** - * @param protectMode - */ - public void setProtectMode(ProtectMode protectMode){ - Map parameters = tPartition.getParameters(); - String pm = protectMode.toString(); - if (pm != null) { - parameters.put(ProtectMode.PARAMETER_NAME, pm); - } else { - parameters.remove(ProtectMode.PARAMETER_NAME); - } - tPartition.setParameters(parameters); - } - - /** - * @return protect mode - */ - public ProtectMode getProtectMode(){ - return MetaStoreUtils.getProtectMode(tPartition); - } - - /** - * @return True protect mode indicates the partition if offline. - */ - public boolean isOffline(){ - ProtectMode pm = getProtectMode(); - if (pm == null) { - return false; - } else { - return pm.offline; - } - } - - /** - * @return True if protect mode attribute of the partition indicate - * that it is OK to drop the table - */ - public boolean canDrop() { - return MetaStoreUtils.canDropPartition(table.getTTable(), tPartition); - } - - /** - * @return True if protect mode attribute of the partition indicate - * that it is OK to write to the table - */ - public boolean canWrite() { - ProtectMode mode = getProtectMode(); - return (!mode.offline && !mode.readOnly); - } - - /** * @return include the db name */ public String getCompleteName() { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index e53933e..52ed4a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -18,15 +18,6 @@ package org.apache.hadoop.hive.ql.metadata; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; - import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -37,7 +28,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; -import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -65,6 +55,15 @@ import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.SequenceFileInputFormat; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + /** * A Hive Table: is a fundamental unit of data in Hive that shares a common schema/DDL. * @@ -849,52 +848,6 @@ public boolean isNonNative() { } /** - * @param protectMode - */ - public void setProtectMode(ProtectMode protectMode){ - Map parameters = tTable.getParameters(); - String pm = protectMode.toString(); - if (pm != null) { - parameters.put(ProtectMode.PARAMETER_NAME, pm); - } else { - parameters.remove(ProtectMode.PARAMETER_NAME); - } - tTable.setParameters(parameters); - } - - /** - * @return protect mode - */ - public ProtectMode getProtectMode(){ - return MetaStoreUtils.getProtectMode(tTable); - } - - /** - * @return True protect mode indicates the table if offline. - */ - public boolean isOffline(){ - return getProtectMode().offline; - } - - /** - * @return True if protect mode attribute of the partition indicate - * that it is OK to drop the partition - */ - public boolean canDrop() { - ProtectMode mode = getProtectMode(); - return (!mode.noDrop && !mode.offline && !mode.readOnly && !mode.noDropCascade); - } - - /** - * @return True if protect mode attribute of the table indicate - * that it is OK to write the table - */ - public boolean canWrite() { - ProtectMode mode = getProtectMode(); - return (!mode.offline && !mode.readOnly); - } - - /** * @return include the db name */ public String getCompleteName() { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index bc09fc3..a78700d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -18,16 +18,6 @@ package org.apache.hadoop.hive.ql.metadata.formatting; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - import org.apache.commons.lang.StringEscapeUtils; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; @@ -55,6 +45,16 @@ import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.serde2.io.DateWritable; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + /** * This class provides methods to format table and index information. @@ -357,8 +357,6 @@ private static void getTableMetaDataInformation(StringBuilder tableInfo, Table formatOutput("Owner:", tbl.getOwner(), tableInfo); formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo); formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo); - String protectMode = tbl.getProtectMode().toString(); - formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo); formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo); if (!tbl.isView()) { formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo); @@ -378,8 +376,6 @@ private static void getPartitionMetaDataInformation(StringBuilder tableInfo, Par formatOutput("CreateTime:", formatDate(part.getTPartition().getCreateTime()), tableInfo); formatOutput("LastAccessTime:", formatDate(part.getTPartition().getLastAccessTime()), tableInfo); - String protectMode = part.getProtectMode().toString(); - formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo); formatOutput("Location:", part.getLocation(), tableInfo); if (part.getTPartition().getParameters().size() > 0) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 24ca663..21625bc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -18,28 +18,7 @@ package org.apache.hadoop.hive.ql.parse; -import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION; -import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES; - -import java.io.Serializable; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Properties; -import java.util.Set; - +import com.google.common.collect.Lists; import org.antlr.runtime.tree.CommonTree; import org.antlr.runtime.tree.Tree; import org.apache.commons.logging.Log; @@ -158,7 +137,27 @@ import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.util.StringUtils; -import com.google.common.collect.Lists; +import java.io.Serializable; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; + +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION; +import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES; /** * DDLSemanticAnalyzer. @@ -288,8 +287,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { analyzeExchangePartition(qualified, ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { analyzeAlterTableFileFormat(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROTECTMODE) { - analyzeAlterTableProtectMode(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) { analyzeAlterTableLocation(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) { @@ -1476,56 +1473,6 @@ private void analyzeAlterTableLocation(ASTNode ast, String tableName, } - private void analyzeAlterTableProtectMode(ASTNode ast, String tableName, - HashMap partSpec) - throws SemanticException { - - AlterTableDesc alterTblDesc = - new AlterTableDesc(AlterTableTypes.ALTERPROTECTMODE); - - alterTblDesc.setOldName(tableName); - alterTblDesc.setPartSpec(partSpec); - - ASTNode child = (ASTNode) ast.getChild(0); - - switch (child.getToken().getType()) { - case HiveParser.TOK_ENABLE: - alterTblDesc.setProtectModeEnable(true); - break; - case HiveParser.TOK_DISABLE: - alterTblDesc.setProtectModeEnable(false); - break; - default: - throw new SemanticException( - "Set Protect mode Syntax parsing error."); - } - - ASTNode grandChild = (ASTNode) child.getChild(0); - switch (grandChild.getToken().getType()) { - case HiveParser.TOK_OFFLINE: - alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.OFFLINE); - break; - case HiveParser.TOK_NO_DROP: - if (grandChild.getChildCount() > 0) { - alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP_CASCADE); - } - else { - alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP); - } - break; - case HiveParser.TOK_READONLY: - throw new SemanticException( - "Potect mode READONLY is not implemented"); - default: - throw new SemanticException( - "Only protect mode NO_DROP or OFFLINE supported"); - } - - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - private void analyzeAlterTablePartMergeFiles(ASTNode ast, String tableName, HashMap partSpec) throws SemanticException { @@ -2690,11 +2637,10 @@ private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean re.noLockNeeded(); inputs.add(re); - boolean ignoreProtection = ast.getFirstChildWithType(HiveParser.TOK_IGNOREPROTECTION) != null; - addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection); + addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists); DropTableDesc dropTblDesc = - new DropTableDesc(getDotName(qualified), partSpecs, expectView, ignoreProtection, mustPurge, replicationSpec); + new DropTableDesc(getDotName(qualified), partSpecs, expectView, mustPurge, replicationSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } @@ -3165,9 +3111,8 @@ private void addTablePartsOutputs(Table table, List> partSpe * throwIfNonExistent is true, otherwise ignore it. */ private void addTableDropPartsOutputs(Table tab, - Collection> partSpecs, boolean throwIfNonExistent, - boolean ignoreProtection) throws SemanticException { - + Collection> partSpecs, + boolean throwIfNonExistent) throws SemanticException { for (List specs : partSpecs) { for (ExprNodeGenericFuncDesc partSpec : specs) { List parts = new ArrayList(); @@ -3193,11 +3138,6 @@ private void addTableDropPartsOutputs(Table tab, } } for (Partition p : parts) { - // TODO: same thing, metastore already checks this but check here if we can. - if (!ignoreProtection && !p.canDrop()) { - throw new SemanticException( - ErrorMsg.DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION.getMsg(p.getCompleteName())); - } outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_EXCLUSIVE)); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index a4c5d0e..bdf0ed7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -18,20 +18,6 @@ package org.apache.hadoop.hive.ql.parse; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.StringTokenizer; -import java.util.TreeMap; - import com.google.common.base.Function; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -56,6 +42,18 @@ import org.json.JSONObject; import javax.annotation.Nullable; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.StringTokenizer; +import java.util.TreeMap; /** * @@ -129,11 +127,6 @@ static URI getValidatedURI(HiveConf conf, String dcPath) throws SemanticExceptio } static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws SemanticException { - if (table.isOffline()) { - throw new SemanticException( - ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(":Table " - + table.getTableName())); - } if (table.isView()) { throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index bdd7cb7..85c0ae6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -110,8 +110,6 @@ KW_AFTER: 'AFTER'; KW_DESCRIBE: 'DESCRIBE'; KW_DROP: 'DROP'; KW_RENAME: 'RENAME'; -KW_IGNORE: 'IGNORE'; -KW_PROTECTION: 'PROTECTION'; KW_TO: 'TO'; KW_COMMENT: 'COMMENT'; KW_BOOLEAN: 'BOOLEAN'; @@ -157,11 +155,8 @@ KW_INPUTFORMAT: 'INPUTFORMAT'; KW_OUTPUTFORMAT: 'OUTPUTFORMAT'; KW_INPUTDRIVER: 'INPUTDRIVER'; KW_OUTPUTDRIVER: 'OUTPUTDRIVER'; -KW_OFFLINE: 'OFFLINE'; KW_ENABLE: 'ENABLE'; KW_DISABLE: 'DISABLE'; -KW_READONLY: 'READONLY'; -KW_NO_DROP: 'NO_DROP'; KW_LOCATION: 'LOCATION'; KW_TABLESAMPLE: 'TABLESAMPLE'; KW_BUCKET: 'BUCKET'; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 15f1f11..3f95bb8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -151,7 +151,6 @@ TOK_ALTERTABLE_REPLACECOLS; TOK_ALTERTABLE_ADDPARTS; TOK_ALTERTABLE_DROPPARTS; TOK_ALTERTABLE_PARTCOLTYPE; -TOK_ALTERTABLE_PROTECTMODE; TOK_ALTERTABLE_MERGEFILES; TOK_ALTERTABLE_TOUCH; TOK_ALTERTABLE_ARCHIVE; @@ -330,7 +329,6 @@ TOK_WINDOWDEF; TOK_WINDOWSPEC; TOK_WINDOWVALUES; TOK_WINDOWRANGE; -TOK_IGNOREPROTECTION; TOK_SUBQUERY_EXPR; TOK_SUBQUERY_OP; TOK_SUBQUERY_OP_NOTIN; @@ -809,13 +807,6 @@ orReplace -> ^(TOK_ORREPLACE) ; -ignoreProtection -@init { pushMsg("ignore protection clause", state); } -@after { popMsg(state); } - : KW_IGNORE KW_PROTECTION - -> ^(TOK_IGNOREPROTECTION) - ; - createDatabaseStatement @init { pushMsg("create database statement", state); } @after { popMsg(state); } @@ -1022,7 +1013,6 @@ alterTblPartitionStatementSuffix @after {popMsg(state);} : alterStatementSuffixFileFormat | alterStatementSuffixLocation - | alterStatementSuffixProtectMode | alterStatementSuffixMergeFiles | alterStatementSuffixSerdeProperties | alterStatementSuffixRenamePart @@ -1166,9 +1156,9 @@ partitionLocation alterStatementSuffixDropPartitions[boolean table] @init { pushMsg("drop partition statement", state); } @after { popMsg(state); } - : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? KW_PURGE? replicationClause? - -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? KW_PURGE? replicationClause?) - -> ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? replicationClause?) + : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* KW_PURGE? replicationClause? + -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? KW_PURGE? replicationClause?) + -> ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? replicationClause?) ; alterStatementSuffixProperties @@ -1276,13 +1266,6 @@ alterStatementSuffixExchangePartition -> ^(TOK_ALTERTABLE_EXCHANGEPARTITION partitionSpec $exchangename) ; -alterStatementSuffixProtectMode -@init { pushMsg("alter partition protect mode statement", state); } -@after { popMsg(state); } - : alterProtectMode - -> ^(TOK_ALTERTABLE_PROTECTMODE alterProtectMode) - ; - alterStatementSuffixRenamePart @init { pushMsg("alter table rename partition statement", state); } @after { popMsg(state); } @@ -1304,21 +1287,6 @@ alterStatementSuffixMergeFiles -> ^(TOK_ALTERTABLE_MERGEFILES) ; -alterProtectMode -@init { pushMsg("protect mode specification enable", state); } -@after { popMsg(state); } - : KW_ENABLE alterProtectModeMode -> ^(TOK_ENABLE alterProtectModeMode) - | KW_DISABLE alterProtectModeMode -> ^(TOK_DISABLE alterProtectModeMode) - ; - -alterProtectModeMode -@init { pushMsg("protect mode specification enable", state); } -@after { popMsg(state); } - : KW_OFFLINE -> ^(TOK_OFFLINE) - | KW_NO_DROP KW_CASCADE? -> ^(TOK_NO_DROP KW_CASCADE?) - | KW_READONLY -> ^(TOK_READONLY) - ; - alterStatementSuffixBucketNum @init { pushMsg("", state); } @after { popMsg(state); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 187dc20..1f815d0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -206,12 +206,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { // initialize destination table/partition TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree); - if (ts.tableHandle.isOffline()){ - throw new SemanticException( - ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(":Table " + ts.tableName)); - } - - if (ts.tableHandle.isView()) { + if (ts.tableHandle.isView()) { throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg()); } if (ts.tableHandle.isNonNative()) { @@ -255,10 +250,6 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { try{ Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false); if (part != null) { - if (part.isOffline()) { - throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION. - getMsg(ts.tableName + ":" + part.getName())); - } if (isOverWrite){ outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT_OVERWRITE)); } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index f41668b..6e353a1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -1599,19 +1599,7 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, tab_name); } - // We check offline of the table, as if people only select from an - // non-existing partition of an offline table, the partition won't - // be added to inputs and validate() won't have the information to - // check the table's offline status. - // TODO: Modify the code to remove the checking here and consolidate - // it in validate() - // - if (tab.isOffline()) { - throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION. - getMsg("Table " + getUnescapedName(qb.getParseInfo().getSrcForAlias(alias)))); - } - - if (tab.isView()) { + if (tab.isView()) { if (qb.getParseInfo().isAnalyzeCommand()) { throw new SemanticException(ErrorMsg.ANALYZE_VIEW.getMsg()); } @@ -10547,20 +10535,6 @@ public void validate() throws SemanticException { Table tbl = readEntity.getTable(); Partition p = readEntity.getPartition(); - - - if (tbl.isOffline()) { - throw new SemanticException( - ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg( - "Table " + tbl.getTableName())); - } - - if (type == ReadEntity.Type.PARTITION && p != null && p.isOffline()) { - throw new SemanticException( - ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg( - "Table " + tbl.getTableName() + - " Partition " + p.getName())); - } } for (WriteEntity writeEntity : getOutputs()) { @@ -10614,25 +10588,11 @@ public void validate() throws SemanticException { } catch (HiveException e) { throw new SemanticException(e); } - - if (type == WriteEntity.Type.PARTITION && p != null && p.isOffline()) { - throw new SemanticException( - ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg( - " Table " + tbl.getTableName() + - " Partition " + p.getName())); - } - } else { LOG.debug("Not a partition."); tbl = writeEntity.getTable(); } - - if (tbl.isOffline()) { - throw new SemanticException( - ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg( - "Table " + tbl.getTableName())); - } } boolean reworkMapredWork = HiveConf.getBoolVar(this.conf, diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 97d02ea..2fdf1e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -18,13 +18,13 @@ package org.apache.hadoop.hive.ql.parse; -import java.util.HashMap; - import org.antlr.runtime.tree.Tree; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.SessionState; +import java.util.HashMap; + /** * SemanticAnalyzerFactory. * @@ -114,10 +114,6 @@ } static { - tablePartitionCommandType.put( - HiveParser.TOK_ALTERTABLE_PROTECTMODE, - new HiveOperation[] { HiveOperation.ALTERTABLE_PROTECTMODE, - HiveOperation.ALTERPARTITION_PROTECTMODE }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT, new HiveOperation[] { HiveOperation.ALTERTABLE_FILEFORMAT, HiveOperation.ALTERPARTITION_FILEFORMAT }); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 9e1ac80..577af8b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -18,14 +18,6 @@ package org.apache.hadoop.hive.ql.plan; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -34,6 +26,14 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.Explain.Level; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + /** * AlterTableDesc. * @@ -51,7 +51,7 @@ ADDPROPS("add props"), DROPPROPS("drop props"), ADDSERDE("add serde"), ADDSERDEPROPS("add serde props"), ADDFILEFORMAT("add fileformat"), ADDCLUSTERSORTCOLUMN("add cluster sort column"), RENAMECOLUMN("rename column"), ADDPARTITION("add partition"), TOUCH("touch"), ARCHIVE("archieve"), - UNARCHIVE("unarchieve"), ALTERPROTECTMODE("alter protect mode"), + UNARCHIVE("unarchieve"), ALTERPARTITIONPROTECTMODE("alter partition protect mode"), ALTERLOCATION("alter location"), DROPPARTITION("drop partition"), RENAMEPARTITION("rename partition"), ADDSKEWEDBY("add skew column"), ALTERSKEWEDLOCATION("alter skew location"), ALTERBUCKETNUM("alter bucket number"), @@ -72,7 +72,6 @@ new HashSet(); static { - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE); alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDCOLS); alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.REPLACECOLS); alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.RENAMECOLUMN); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java index bb0e7f7..62c8f7e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java @@ -18,12 +18,13 @@ package org.apache.hadoop.hive.ql.plan; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; -import org.apache.hadoop.hive.ql.plan.Explain.Level; /** * DropTableDesc. @@ -55,7 +56,6 @@ public int getPrefixLength() { boolean expectView; boolean ifExists; boolean ifPurge; - boolean ignoreProtection; ReplicationSpec replicationSpec; public DropTableDesc() { @@ -73,13 +73,11 @@ public DropTableDesc( this.expectView = expectView; this.ifExists = ifExists; this.ifPurge = ifPurge; - this.ignoreProtection = false; this.replicationSpec = replicationSpec; } public DropTableDesc(String tableName, Map> partSpecs, - boolean expectView, boolean ignoreProtection, boolean ifPurge, - ReplicationSpec replicationSpec) { + boolean expectView, boolean ifPurge, ReplicationSpec replicationSpec) { this.tableName = tableName; this.partSpecs = new ArrayList(partSpecs.size()); for (Map.Entry> partSpec : partSpecs.entrySet()) { @@ -88,7 +86,6 @@ public DropTableDesc(String tableName, Map