diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java index 899ccce..46ebcdc 100644 --- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java +++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDe; -import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.OutputFormat; @@ -433,4 +432,22 @@ public DecomposedPredicate decomposePredicate( decomposedPredicate.residualPredicate = residualPredicate; return decomposedPredicate; } + + @Override + public void truncateTable(Table table) throws MetaException { + String tableName = getHBaseTableName(table); + try { + HBaseAdmin admin = getHBaseAdmin(); + HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(tableName)); + if (admin.tableExists(tableName)) { + if (admin.isTableEnabled(tableName)) { + admin.disableTable(tableName); + } + admin.deleteTable(tableName); + admin.createTable(tableDesc); + } + } catch (IOException ie) { + throw new MetaException(StringUtils.stringifyException(ie)); + } + } } diff --git hbase-handler/src/test/queries/positive/hbase_queries.q hbase-handler/src/test/queries/positive/hbase_queries.q index 9ea9b1a..17da0a4 100644 --- hbase-handler/src/test/queries/positive/hbase_queries.q +++ hbase-handler/src/test/queries/positive/hbase_queries.q @@ -148,6 +148,9 @@ FROM src WHERE key=98 OR key=100; SELECT * FROM hbase_table_8 ORDER BY key; +truncate table hbase_table_8; +SELECT * FROM hbase_table_8; + DROP TABLE hbase_table_1; DROP TABLE hbase_table_2; DROP TABLE hbase_table_3; diff --git hbase-handler/src/test/results/positive/hbase_queries.q.out hbase-handler/src/test/results/positive/hbase_queries.q.out index 2768755..164a090 100644 --- hbase-handler/src/test/results/positive/hbase_queries.q.out +++ hbase-handler/src/test/results/positive/hbase_queries.q.out @@ -886,6 +886,20 @@ POSTHOOK: Input: default@hbase_table_8 #### A masked pattern was here #### 98 val_98 99 100 100 val_100 101 102 +PREHOOK: query: truncate table hbase_table_8 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@hbase_table_8 +POSTHOOK: query: truncate table hbase_table_8 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@hbase_table_8 +PREHOOK: query: SELECT * FROM hbase_table_8 +PREHOOK: type: QUERY +PREHOOK: Input: default@hbase_table_8 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM hbase_table_8 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@hbase_table_8 +#### A masked pattern was here #### PREHOOK: query: DROP TABLE hbase_table_1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@hbase_table_1 diff --git hcatalog/core/src/main/java/org/apache/hcatalog/mapreduce/HCatStorageHandler.java hcatalog/core/src/main/java/org/apache/hcatalog/mapreduce/HCatStorageHandler.java index 6dfa506..b071351 100644 --- hcatalog/core/src/main/java/org/apache/hcatalog/mapreduce/HCatStorageHandler.java +++ hcatalog/core/src/main/java/org/apache/hcatalog/mapreduce/HCatStorageHandler.java @@ -22,8 +22,8 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; import org.apache.hadoop.mapred.OutputFormat; @@ -33,7 +33,7 @@ * the storage handlers required for non-native tables in HCatalog. * @deprecated Use/modify {@link org.apache.hadoop.hive.ql.metadata.HiveStorageHandler} instead */ -public abstract class HCatStorageHandler implements HiveStorageHandler { +public abstract class HCatStorageHandler extends DefaultStorageHandler { //TODO move this to HiveStorageHandler diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java index 570b358..9e7e125 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; /** @@ -89,4 +88,12 @@ public void rollbackDropTable(Table table) */ public void commitDropTable(Table table, boolean deleteData) throws MetaException; + + /** + * Called for truncate table + * + * @param table table definition + * @throws MetaException + */ + public void truncateTable(Table table) throws MetaException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 92ed55b..653a89e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.TableType; @@ -4043,6 +4044,18 @@ private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws H Table table = db.getTable(tableName, true); + if (table.isNonNative()) { + HiveMetaHook metaHook = table.getStorageHandler().getMetaHook(); + try { + metaHook.truncateTable(table.getTTable()); + } catch (MetaException e) { + throw new HiveException(e.toString(), e); + } catch (UnsupportedOperationException e) { + throw new HiveException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); + } + return 0; + } + try { // this is not transactional for (Path location : getLocations(db, table, partSpec)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java index 83772bd..e0e3555 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; @@ -101,4 +102,8 @@ public Configuration getConf() { public void setConf(Configuration conf) { this.conf = conf; } + + public void truncateTable(org.apache.hadoop.hive.metastore.api.Table table) throws MetaException { + throw new UnsupportedOperationException("truncateTable"); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 0e2d555..3c00c59 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -714,8 +714,8 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { if (table.getTableType() != TableType.MANAGED_TABLE) { throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_MANAGED_TABLE.format(tableName)); } - if (table.isNonNative()) { - throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); //TODO + if (table.getStorageHandler().getMetaHook() == null) { + throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); } if (!table.isPartitioned() && root.getChildCount() > 1) { throw new SemanticException(ErrorMsg.PARTSPEC_FOR_NON_PARTITIONED_TABLE.format(tableName));