diff --git accumulo-handler/src/test/results/positive/accumulo_queries.q.out accumulo-handler/src/test/results/positive/accumulo_queries.q.out index d7cceec..de82857 100644 --- accumulo-handler/src/test/results/positive/accumulo_queries.q.out +++ accumulo-handler/src/test/results/positive/accumulo_queries.q.out @@ -40,8 +40,9 @@ POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage - Stage-1 - Stage-2 is a root stage + Stage-2 + Stage-1 is a root stage + Stage-3 is a root stage STAGE PLANS: Stage: Stage-0 @@ -52,11 +53,15 @@ STAGE PLANS: properties: COLUMN_STATS_ACCURATE - Stage: Stage-1 + Stage: Stage-2 Insert operator: Insert - Stage: Stage-2 + Stage: Stage-1 + Pre Insert operator: + Pre-Insert task + + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan @@ -495,9 +500,10 @@ ON (x.key = Y.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage - Stage-1 - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 + Stage-2 + Stage-1 is a root stage + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 STAGE PLANS: Stage: Stage-0 @@ -508,11 +514,15 @@ STAGE PLANS: properties: COLUMN_STATS_ACCURATE - Stage: Stage-1 + Stage: Stage-2 Insert operator: Insert - Stage: Stage-3 + Stage: Stage-1 + Pre Insert operator: + Pre-Insert task + + Stage: Stage-4 Map Reduce Map Operator Tree: TableScan @@ -547,7 +557,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-2 + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan diff --git accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out index 7330746..6621a4e 100644 --- accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out +++ accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out @@ -34,15 +34,16 @@ select value,"" where a.key > 50 AND a.key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage - Stage-1 - Stage-3 is a root stage - Stage-9 depends on stages: Stage-3 , consists of Stage-6, Stage-5, Stage-7 - Stage-6 - Stage-2 depends on stages: Stage-6, Stage-5, Stage-8 - Stage-4 depends on stages: Stage-2 - Stage-5 + Stage-2 + Stage-1 is a root stage + Stage-4 is a root stage + Stage-10 depends on stages: Stage-4 , consists of Stage-7, Stage-6, Stage-8 Stage-7 - Stage-8 depends on stages: Stage-7 + Stage-3 depends on stages: Stage-7, Stage-6, Stage-9 + Stage-5 depends on stages: Stage-3 + Stage-6 + Stage-8 + Stage-9 depends on stages: Stage-8 STAGE PLANS: Stage: Stage-0 @@ -53,11 +54,15 @@ STAGE PLANS: properties: COLUMN_STATS_ACCURATE - Stage: Stage-1 + Stage: Stage-2 Insert operator: Insert - Stage: Stage-3 + Stage: Stage-1 + Pre Insert operator: + Pre-Insert task + + Stage: Stage-4 Map Reduce Map Operator Tree: TableScan @@ -94,16 +99,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe name: default.src_x2 - Stage: Stage-9 + Stage: Stage-10 Conditional Operator - Stage: Stage-6 + Stage: Stage-7 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-2 + Stage: Stage-3 Move Operator tables: replace: true @@ -113,10 +118,10 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_x1 - Stage: Stage-4 + Stage: Stage-5 Stats-Aggr Operator - Stage: Stage-5 + Stage: Stage-6 Map Reduce Map Operator Tree: TableScan @@ -128,7 +133,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_x1 - Stage: Stage-7 + Stage: Stage-8 Map Reduce Map Operator Tree: TableScan @@ -140,7 +145,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_x1 - Stage: Stage-8 + Stage: Stage-9 Move Operator files: hdfs directory: true diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java index 904ac80..cff0056 100644 --- druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java +++ druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java @@ -42,6 +42,7 @@ import io.druid.segment.loading.SegmentLoadingException; import io.druid.timeline.DataSegment; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.Constants; @@ -49,16 +50,21 @@ import org.apache.hadoop.hive.druid.io.DruidOutputFormat; import org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat; import org.apache.hadoop.hive.druid.serde.DruidSerDe; -import org.apache.hadoop.hive.metastore.HiveMetaHookV2; +import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; +import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.AbstractSerDe; import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.OutputFormat; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -79,7 +85,7 @@ * DruidStorageHandler provides a HiveStorageHandler implementation for Druid. */ @SuppressWarnings({ "deprecation", "rawtypes" }) -public class DruidStorageHandler extends DefaultStorageHandler implements HiveMetaHookV2 { +public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStorageHandler { protected static final Logger LOG = LoggerFactory.getLogger(DruidStorageHandler.class); @@ -99,6 +105,8 @@ private String rootWorkingDir = null; + private Configuration conf; + public DruidStorageHandler() { //this is the default value in druid final String base = HiveConf @@ -178,6 +186,17 @@ public HiveMetaHook getMetaHook() { } @Override + public HiveAuthorizationProvider getAuthorizationProvider() throws HiveException { + return new DefaultHiveAuthorizationProvider(); + } + + @Override + public void configureInputJobProperties(TableDesc tableDesc, Map jobProperties + ) { + + } + + @Override public void preCreateTable(Table table) throws MetaException { // Do safety checks if (MetaStoreUtils.isExternalTable(table) && !StringUtils @@ -476,7 +495,9 @@ public void commitInsertTable(Table table, boolean overwrite) throws MetaExcepti @Override public void preInsertTable(Table table, boolean overwrite) throws MetaException { - //do nothing + if (!overwrite) { + throw new MetaException("INSERT INTO statement is not allowed by druid storage handler"); + } } @Override @@ -492,6 +513,27 @@ public void configureOutputJobProperties(TableDesc tableDesc, Map jobProperties + ) { + + } + + @Override + public void configureJobConf(TableDesc tableDesc, JobConf jobConf) { + + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override public String toString() { return Constants.DRUID_HIVE_STORAGE_HANDLER_ID; } diff --git hbase-handler/src/test/results/positive/hbase_queries.q.out hbase-handler/src/test/results/positive/hbase_queries.q.out index 1eeaf80..276b6b8 100644 --- hbase-handler/src/test/results/positive/hbase_queries.q.out +++ hbase-handler/src/test/results/positive/hbase_queries.q.out @@ -40,8 +40,9 @@ POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE hbase_table_1 SELECT * POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage - Stage-1 - Stage-2 is a root stage + Stage-2 + Stage-1 is a root stage + Stage-3 is a root stage STAGE PLANS: Stage: Stage-0 @@ -52,11 +53,15 @@ STAGE PLANS: properties: COLUMN_STATS_ACCURATE - Stage: Stage-1 + Stage: Stage-2 Insert operator: Insert - Stage: Stage-2 + Stage: Stage-1 + Pre Insert operator: + Pre-Insert task + + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan @@ -498,9 +503,10 @@ ON (x.key = Y.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage - Stage-1 - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 + Stage-2 + Stage-1 is a root stage + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 STAGE PLANS: Stage: Stage-0 @@ -511,11 +517,15 @@ STAGE PLANS: properties: COLUMN_STATS_ACCURATE - Stage: Stage-1 + Stage: Stage-2 Insert operator: Insert - Stage: Stage-3 + Stage: Stage-1 + Pre Insert operator: + Pre-Insert task + + Stage: Stage-4 Map Reduce Map Operator Tree: TableScan @@ -550,7 +560,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-2 + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan diff --git hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out index 079fb0e..68a417d 100644 --- hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out +++ hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out @@ -34,15 +34,16 @@ select value,"" where a.key > 50 AND a.key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage - Stage-1 - Stage-3 is a root stage - Stage-9 depends on stages: Stage-3 , consists of Stage-6, Stage-5, Stage-7 - Stage-6 - Stage-2 depends on stages: Stage-6, Stage-5, Stage-8 - Stage-4 depends on stages: Stage-2 - Stage-5 + Stage-2 + Stage-1 is a root stage + Stage-4 is a root stage + Stage-10 depends on stages: Stage-4 , consists of Stage-7, Stage-6, Stage-8 Stage-7 - Stage-8 depends on stages: Stage-7 + Stage-3 depends on stages: Stage-7, Stage-6, Stage-9 + Stage-5 depends on stages: Stage-3 + Stage-6 + Stage-8 + Stage-9 depends on stages: Stage-8 STAGE PLANS: Stage: Stage-0 @@ -53,11 +54,15 @@ STAGE PLANS: properties: COLUMN_STATS_ACCURATE - Stage: Stage-1 + Stage: Stage-2 Insert operator: Insert - Stage: Stage-3 + Stage: Stage-1 + Pre Insert operator: + Pre-Insert task + + Stage: Stage-4 Map Reduce Map Operator Tree: TableScan @@ -94,16 +99,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.hbase.HBaseSerDe name: default.src_x2 - Stage: Stage-9 + Stage: Stage-10 Conditional Operator - Stage: Stage-6 + Stage: Stage-7 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-2 + Stage: Stage-3 Move Operator tables: replace: true @@ -113,10 +118,10 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_x1 - Stage: Stage-4 + Stage: Stage-5 Stats-Aggr Operator - Stage: Stage-5 + Stage: Stage-6 Map Reduce Map Operator Tree: TableScan @@ -128,7 +133,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_x1 - Stage: Stage-7 + Stage: Stage-8 Map Reduce Map Operator Tree: TableScan @@ -140,7 +145,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_x1 - Stage: Stage-8 + Stage: Stage-9 Move Operator files: hdfs directory: true diff --git hbase-handler/src/test/results/positive/hbasestats.q.out hbase-handler/src/test/results/positive/hbasestats.q.out index 4e47bf5..bf902e4 100644 --- hbase-handler/src/test/results/positive/hbasestats.q.out +++ hbase-handler/src/test/results/positive/hbasestats.q.out @@ -63,8 +63,9 @@ POSTHOOK: query: explain INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage - Stage-1 - Stage-2 is a root stage + Stage-2 + Stage-1 is a root stage + Stage-3 is a root stage STAGE PLANS: Stage: Stage-0 @@ -75,11 +76,15 @@ STAGE PLANS: properties: COLUMN_STATS_ACCURATE - Stage: Stage-1 + Stage: Stage-2 Insert operator: Insert - Stage: Stage-2 + Stage: Stage-1 + Pre Insert operator: + Pre-Insert task + + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan diff --git metastore/src/java/org/apache/hadoop/hive/metastore/DefaultHiveMetaHook.java metastore/src/java/org/apache/hadoop/hive/metastore/DefaultHiveMetaHook.java new file mode 100644 index 0000000..0957945 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/DefaultHiveMetaHook.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; + +public abstract class DefaultHiveMetaHook implements HiveMetaHook { + /** + * Called after successfully after INSERT [OVERWRITE] statement is executed. + * @param table table definition + * @param overwrite true if it is INSERT OVERWRITE + * + * @throws MetaException + */ + public abstract void commitInsertTable(Table table, boolean overwrite) throws MetaException; + + /** + * called before commit insert method is called + * @param table table definition + * @param overwrite true if it is INSERT OVERWRITE + * + * @throws MetaException + */ + public abstract void preInsertTable(Table table, boolean overwrite) throws MetaException; + + /** + * called in case pre commit or commit insert fail. + * @param table table definition + * @param overwrite true if it is INSERT OVERWRITE + * + * @throws MetaException + */ + public abstract void rollbackInsertTable(Table table, boolean overwrite) throws MetaException; +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookV2.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookV2.java deleted file mode 100644 index e691c1f..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookV2.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Table; - -public interface HiveMetaHookV2 extends HiveMetaHook { - /** - * Called after successfully after INSERT [OVERWRITE] statement is executed. - * @param table table definition - * @param overwrite true if it is INSERT OVERWRITE - * - * @throws MetaException - */ - public void commitInsertTable(Table table, boolean overwrite) throws MetaException; - - /** - * called before commit insert method is called - * @param table table definition - * @param overwrite true if it is INSERT OVERWRITE - * - * @throws MetaException - */ - public void preInsertTable(Table table, boolean overwrite) throws MetaException; - - /** - * called in case pre commit or commit insert fail. - * @param table table definition - * @param overwrite true if it is INSERT OVERWRITE - * - * @throws MetaException - */ - public void rollbackInsertTable(Table table, boolean overwrite) throws MetaException; -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index b5d007d..c32104f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2219,14 +2219,15 @@ public void addDynamicPartitions(long txnId, String dbName, String tableName, public void insertTable(Table table, boolean overwrite) throws MetaException { boolean failed = true; HiveMetaHook hook = getHook(table); - if (hook == null || !(hook instanceof HiveMetaHookV2)) { + if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { return; } - HiveMetaHookV2 hiveMetaHook = (HiveMetaHookV2) hook; + DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; try { - hiveMetaHook.preInsertTable(table, overwrite); hiveMetaHook.commitInsertTable(table, overwrite); - } finally { + failed = false; + } + finally { if (failed) { hiveMetaHook.rollbackInsertTable(table, overwrite); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 84ec332..a07c695 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1509,7 +1509,7 @@ void addDynamicPartitions(long txnId, String dbName, String tableName, List[] tasks = new Task[this.rootTasks.size()]; tasks = this.rootTasks.toArray(tasks); - InsertTableDesc insertTableDesc = new InsertTableDesc(table.getTTable(), overwrite); + PreInsertTableDesc preInsertTableDesc = new PreInsertTableDesc(table, overwrite); + InsertTableDesc insertTableDesc = new InsertTableDesc(table, overwrite); + this.rootTasks + .add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), preInsertTableDesc), conf)); TaskFactory .getAndMakeChild(new DDLWork(getInputs(), getOutputs(), insertTableDesc), conf, tasks); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index c4efb3f..2b9e897 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; +import org.apache.hadoop.hive.ql.parse.PreInsertTableDesc; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** @@ -32,6 +33,7 @@ public class DDLWork implements Serializable { private static final long serialVersionUID = 1L; + private PreInsertTableDesc preInsertTableDesc; private InsertTableDesc insertTableDesc; private CreateIndexDesc createIndexDesc; private AlterIndexDesc alterIndexDesc; @@ -532,6 +534,12 @@ public DDLWork(HashSet inputs, HashSet outputs, this.insertTableDesc = insertTableDesc; } + public DDLWork(HashSet inputs, HashSet outputs, + PreInsertTableDesc preInsertTableDesc) { + this(inputs, outputs); + this.preInsertTableDesc = preInsertTableDesc; + } + /** * @return Create Database descriptor */ @@ -1202,4 +1210,13 @@ public InsertTableDesc getInsertTableDesc() { public void setInsertTableDesc(InsertTableDesc insertTableDesc) { this.insertTableDesc = insertTableDesc; } + + @Explain(displayName = "Pre Insert operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public PreInsertTableDesc getPreInsertTableDesc() { + return preInsertTableDesc; + } + + public void setPreInsertTableDesc(PreInsertTableDesc preInsertTableDesc) { + this.preInsertTableDesc = preInsertTableDesc; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/InsertTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/InsertTableDesc.java index 1397b8a..212bc7a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/InsertTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/InsertTableDesc.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.plan; -import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.metadata.Table; @Explain(displayName = "Insert", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED }) public class InsertTableDesc extends DDLDesc {