diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/AbortTransactionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/AbortTransactionsDesc.java new file mode 100644 index 0000000000..68a83fc6f3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/AbortTransactionsDesc.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.process; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ABORT TRANSACTIONS commands. + */ +@Explain(displayName = "Abort Transaction", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AbortTransactionsDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AbortTransactionsDesc.class, AbortTransactionsOperation.class); + } + + private final List transactionIds; + + public AbortTransactionsDesc(List transactionIds) { + this.transactionIds = transactionIds; + } + + @Explain(displayName = "Transaction IDs", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public List getTransactionIds() { + return transactionIds; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/AbortTransactionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/AbortTransactionsOperation.java new file mode 100644 index 0000000000..251390581e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/AbortTransactionsOperation.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.process; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of aborting transactions. + */ +public class AbortTransactionsOperation extends DDLOperation { + private final AbortTransactionsDesc desc; + + public AbortTransactionsOperation(DDLOperationContext context, AbortTransactionsDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + context.getDb().abortTransactions(desc.getTransactionIds()); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/KillQueryDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/KillQueriesDesc.java similarity index 60% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/KillQueryDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/process/KillQueriesDesc.java index 3a13064411..0779e9e686 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/KillQueryDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/KillQueriesDesc.java @@ -15,31 +15,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.process; + import java.io.Serializable; import java.util.List; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + /** - * Descriptor for killing queries. + * DDL task description for KILL QUERY commands. */ -@Explain(displayName = "Kill Query", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED }) -public class KillQueryDesc extends DDLDesc implements Serializable { +@Explain(displayName = "Kill Query", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class KillQueriesDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private List queryIds; - - public KillQueryDesc() { + + static { + DDLTask2.registerOperation(KillQueriesDesc.class, KillQueriesOperation.class); } - public KillQueryDesc(List queryIds) { + private List queryIds; + + public KillQueriesDesc(List queryIds) { this.queryIds = queryIds; } - @Explain(displayName = "Query IDs", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED }) + @Explain(displayName = "Query IDs", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public List getQueryIds() { return queryIds; } - - public void setQueryIds(List queryIds) { - this.queryIds = queryIds; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/KillQueriesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/KillQueriesOperation.java new file mode 100644 index 0000000000..74d37ece28 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/KillQueriesOperation.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.process; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Operation process of killing queries. + */ +public class KillQueriesOperation extends DDLOperation { + private final KillQueriesDesc desc; + + public KillQueriesOperation(DDLOperationContext context, KillQueriesDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + SessionState sessionState = SessionState.get(); + for (String queryId : desc.getQueryIds()) { + sessionState.getKillQuery().killQuery(queryId, "User invoked KILL QUERY", context.getDb().getConf()); + } + LOG.info("kill query called ({})", desc.getQueryIds()); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsDesc.java similarity index 68% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsDesc.java index 093800f6bb..14fbd5c8c4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsDesc.java @@ -15,42 +15,37 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.process; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.io.Serializable; /** - * Descriptor for showing compactions. + * DDL task description for SHOW COMPACTIONS commands. */ -public class ShowCompactionsDesc extends DDLDesc implements Serializable { - +@Explain(displayName = "Show Compactions", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowCompactionsDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private static final String schema = + + static { + DDLTask2.registerOperation(ShowCompactionsDesc.class, ShowCompactionsOperation.class); + } + + public static final String SCHEMA = "compactionid,dbname,tabname,partname,type,state,hostname,workerid,starttime,duration,hadoopjobid#" + "string:string:string:string:string:string:string:string:string:string:string"; private String resFile; - /** - * - * @param resFile File that results of show will be written to. - */ public ShowCompactionsDesc(Path resFile) { this.resFile = resFile.toString(); } - /** - * No arg constructor for serialization. - */ - public ShowCompactionsDesc() { - } - - public String getSchema() { - return schema; - } - public String getResFile() { return resFile; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java new file mode 100644 index 0000000000..e148aa0fe0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.process; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of showing compactions. + */ +public class ShowCompactionsOperation extends DDLOperation { + private final ShowCompactionsDesc desc; + + public ShowCompactionsOperation(DDLOperationContext context, ShowCompactionsDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // Call the metastore to get the status of all known compactions (completed get purged eventually) + ShowCompactResponse rsp = context.getDb().showCompactions(); + + // Write the results into the file + try (DataOutputStream os = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + // Write a header + writeHeader(os); + + if (rsp.getCompacts() != null) { + for (ShowCompactResponseElement e : rsp.getCompacts()) { + writeRow(os, e); + } + } + } catch (IOException e) { + LOG.warn("show compactions: ", e); + return 1; + } + return 0; + } + + private void writeHeader(DataOutputStream os) throws IOException { + os.writeBytes("CompactionId"); + os.write(Utilities.tabCode); + os.writeBytes("Database"); + os.write(Utilities.tabCode); + os.writeBytes("Table"); + os.write(Utilities.tabCode); + os.writeBytes("Partition"); + os.write(Utilities.tabCode); + os.writeBytes("Type"); + os.write(Utilities.tabCode); + os.writeBytes("State"); + os.write(Utilities.tabCode); + os.writeBytes("Hostname"); + os.write(Utilities.tabCode); + os.writeBytes("Worker"); + os.write(Utilities.tabCode); + os.writeBytes("Start Time"); + os.write(Utilities.tabCode); + os.writeBytes("Duration(ms)"); + os.write(Utilities.tabCode); + os.writeBytes("HadoopJobId"); + os.write(Utilities.newLineCode); + } + + private static final String NO_VAL = " --- "; + + private void writeRow(DataOutputStream os, ShowCompactResponseElement e) throws IOException { + os.writeBytes(Long.toString(e.getId())); + os.write(Utilities.tabCode); + os.writeBytes(e.getDbname()); + os.write(Utilities.tabCode); + os.writeBytes(e.getTablename()); + os.write(Utilities.tabCode); + String part = e.getPartitionname(); + os.writeBytes(part == null ? NO_VAL : part); + os.write(Utilities.tabCode); + os.writeBytes(e.getType().toString()); + os.write(Utilities.tabCode); + os.writeBytes(e.getState()); + os.write(Utilities.tabCode); + String wid = e.getWorkerid(); + os.writeBytes(wid == null ? NO_VAL : wid.split("-")[0]); + os.write(Utilities.tabCode); + os.writeBytes(wid == null ? NO_VAL : wid.split("-")[1]); + os.write(Utilities.tabCode); + os.writeBytes(e.isSetStart() ? Long.toString(e.getStart()) : NO_VAL); + os.write(Utilities.tabCode); + os.writeBytes(e.isSetEndTime() ? Long.toString(e.getEndTime() - e.getStart()) : NO_VAL); + os.write(Utilities.tabCode); + os.writeBytes(e.isSetHadoopJobId() ? e.getHadoopJobId() : NO_VAL); + os.write(Utilities.newLineCode); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsDesc.java new file mode 100644 index 0000000000..f9c9374d04 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsDesc.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.process; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +import java.io.Serializable; + +/** + * DDL task description for SHOW TRANSACTIONS commands. + */ +@Explain(displayName = "Show Transactions", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowTransactionsDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(ShowTransactionsDesc.class, ShowTransactionsOperation.class); + } + + public static final String SCHEMA = + "txnid,state,startedtime,lastheartbeattime,user,host#string:string:string:string:string:string"; + + private final String resFile; + + public ShowTransactionsDesc(Path resFile) { + this.resFile = resFile.toString(); + } + + public String getResFile() { + return resFile; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsOperation.java new file mode 100644 index 0000000000..59a795347d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowTransactionsOperation.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.process; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; +import org.apache.hadoop.hive.metastore.api.TxnInfo; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of showing transactions. + */ +public class ShowTransactionsOperation extends DDLOperation { + private final ShowTransactionsDesc desc; + + public ShowTransactionsOperation(DDLOperationContext context, ShowTransactionsDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // Call the metastore to get the currently queued and running compactions. + GetOpenTxnsInfoResponse rsp = context.getDb().showTransactions(); + + // Write the results into the file + try (DataOutputStream os = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + writeHeader(os); + + for (TxnInfo txn : rsp.getOpen_txns()) { + writeRow(os, txn); + } + } catch (IOException e) { + LOG.warn("show transactions: ", e); + return 1; + } + + return 0; + } + + private void writeHeader(DataOutputStream os) throws IOException { + os.writeBytes("Transaction ID"); + os.write(Utilities.tabCode); + os.writeBytes("Transaction State"); + os.write(Utilities.tabCode); + os.writeBytes("Started Time"); + os.write(Utilities.tabCode); + os.writeBytes("Last Heartbeat Time"); + os.write(Utilities.tabCode); + os.writeBytes("User"); + os.write(Utilities.tabCode); + os.writeBytes("Hostname"); + os.write(Utilities.newLineCode); + } + + private void writeRow(DataOutputStream os, TxnInfo txn) throws IOException { + os.writeBytes(Long.toString(txn.getId())); + os.write(Utilities.tabCode); + os.writeBytes(txn.getState().toString()); + os.write(Utilities.tabCode); + os.writeBytes(Long.toString(txn.getStartedTime())); + os.write(Utilities.tabCode); + os.writeBytes(Long.toString(txn.getLastHeartbeatTime())); + os.write(Utilities.tabCode); + os.writeBytes(txn.getUser()); + os.write(Utilities.tabCode); + os.writeBytes(txn.getHostname()); + os.write(Utilities.newLineCode); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/package-info.java new file mode 100644 index 0000000000..07fbccc2df --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Process related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.process; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java index ee32f4c9b4..97217253da 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.creation; import java.io.Serializable; import java.util.ArrayList; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableLikeDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableLikeDesc.java index 6652b79a88..b6c58ba90b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableLikeDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.creation; import java.io.Serializable; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableLikeOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableLikeOperation.java index 7cec1e38af..d6198d9a4b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableLikeOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.creation; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableOperation.java index 2987cab9b0..d5054618c8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.creation; import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.fs.Path; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableDesc.java index f910c57f3a..44b1593f78 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.creation; import java.io.Serializable; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableOperation.java index d250772b2a..a56fabd8a4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/DropTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.creation; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableDesc.java index a06f1fae5f..bba67a4890 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.creation; import java.io.Serializable; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java index 932d9428a9..3ea7443787 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.creation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/package-info.java new file mode 100644 index 0000000000..6644b4bd0b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Table creation related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table.creation; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java index bb533c2c85..cdd1777767 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.info; import java.io.Serializable; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java index 1d94ff3a5b..93deb0f927 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.info; import java.io.DataOutputStream; import java.sql.SQLException; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java index 7ba1c2daef..d9b618ee48 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.info; import java.io.Serializable; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesOperation.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesOperation.java index 385052d705..f145ca53b9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.info; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusDesc.java index 6707350f34..daca469af6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.info; import java.io.Serializable; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusOperation.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusOperation.java index ea695fd1a3..4a1947b28c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTableStatusOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.info; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesDesc.java index 9ec390483d..79b2d955f6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.info; import java.io.Serializable; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesOperation.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesOperation.java index 71b5717bb8..0cbcb0d746 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablesOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.info; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/package-info.java new file mode 100644 index 0000000000..2b21cd95b8 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Table info related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table.info; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableDesc.java index 2a8b02e067..9e545e1a7f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.lock; import java.io.Serializable; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableOperation.java similarity index 96% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableOperation.java index 2044a81406..8c722d4e5e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/LockTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.lock; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.Context; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksDesc.java new file mode 100644 index 0000000000..075492f02e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksDesc.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.lock; + +import java.io.Serializable; +import java.util.Map; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for SHOW LOCKS commands. + */ +@Explain(displayName = "Show Locks", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowLocksDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + private static final String OLD_FORMAT_SCHEMA = "tab_name,mode#string:string"; + private static final String NEW_FORMAT_SCHEMA = "lockid,database,table,partition,lock_state," + + "blocked_by,lock_type,transaction_id,last_heartbeat,acquired_at,user,hostname,agent_info#" + + "string:string:string:string:string:string:string:string:string:string:string:string:string"; + + static { + DDLTask2.registerOperation(ShowLocksDesc.class, ShowLocksOperation.class); + } + + private final String resFile; + private final String dbName; + private final String tableName; + private final Map partSpec; + private final boolean isExt; + private final boolean isNewFormat; + + public ShowLocksDesc(Path resFile, String dbName, boolean isExt, boolean isNewFormat) { + this.resFile = resFile.toString(); + this.dbName = dbName; + this.tableName = null; + this.partSpec = null; + this.isExt = isExt; + this.isNewFormat = isNewFormat; + } + + public ShowLocksDesc(Path resFile, String tableName, Map partSpec, boolean isExt, + boolean isNewFormat) { + this.resFile = resFile.toString(); + this.dbName = null; + this.tableName = tableName; + this.partSpec = partSpec; + this.isExt = isExt; + this.isNewFormat = isNewFormat; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; + } + + @Explain(displayName = "dbName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDbName() { + return dbName; + } + + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Map getPartSpec() { + return partSpec; + } + + public boolean isExt() { + return isExt; + } + + public boolean isNewFormat() { + return isNewFormat; + } + + public String getSchema() { + return isNewFormat ? NEW_FORMAT_SCHEMA : OLD_FORMAT_SCHEMA; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java new file mode 100644 index 0000000000..de0c141bf0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/ShowLocksOperation.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.lock; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.lockmgr.DbLockManager; +import org.apache.hadoop.hive.ql.lockmgr.HiveLock; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Operation process showing the locks. + */ +public class ShowLocksOperation extends DDLOperation { + private final ShowLocksDesc desc; + + public ShowLocksOperation(DDLOperationContext context, ShowLocksDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Context ctx = context.getDriverContext().getCtx(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + HiveLockManager lockMgr = txnManager.getLockManager(); + + if (desc.isNewFormat()) { + return showLocksNewFormat(lockMgr); + } else { + return showLocksOldFormat(lockMgr); + } + } + + private int showLocksOldFormat(HiveLockManager lockMgr) throws HiveException { + if (lockMgr == null) { + throw new HiveException("show Locks LockManager not specified"); + } + + // write the results in the file + try (DataOutputStream os = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + List locks = getLocksForOldFormat(lockMgr); + writeLocksInOldFormat(os, locks); + } catch (IOException e) { + LOG.warn("show function: ", e); + return 1; + } catch (Exception e) { + throw new HiveException(e.toString(), e); + } + + return 0; + } + + private List getLocksForOldFormat(HiveLockManager lockMgr) throws LockException, HiveException { + List locks = null; + if (desc.getTableName() == null) { + // TODO should be doing security check here. Users should not be able to see each other's locks. + locks = lockMgr.getLocks(false, desc.isExt()); + } else { + HiveLockObject lockObject = HiveLockObject.createFrom(context.getDb(), desc.getTableName(), desc.getPartSpec()); + locks = lockMgr.getLocks(lockObject, true, desc.isExt()); + } + Collections.sort(locks, new Comparator() { + @Override + public int compare(HiveLock o1, HiveLock o2) { + int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName()); + if (cmp != 0) { + return cmp; + } + + if (o1.getHiveLockMode() == o2.getHiveLockMode()) { + return 0; + } + + // EXCLUSIVE locks occur before SHARED locks + return (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) ? -1 : +1; + } + }); + return locks; + } + + private void writeLocksInOldFormat(DataOutputStream os, List locks) throws IOException { + for (HiveLock lock : locks) { + os.writeBytes(lock.getHiveLockObject().getDisplayName()); + os.write(Utilities.tabCode); + os.writeBytes(lock.getHiveLockMode().toString()); + + if (desc.isExt()) { + HiveLockObjectData lockData = lock.getHiveLockObject().getData(); + if (lockData != null) { + os.write(Utilities.newLineCode); + os.writeBytes("LOCK_QUERYID:" + lockData.getQueryId()); + os.write(Utilities.newLineCode); + os.writeBytes("LOCK_TIME:" + lockData.getLockTime()); + os.write(Utilities.newLineCode); + os.writeBytes("LOCK_MODE:" + lockData.getLockMode()); + os.write(Utilities.newLineCode); + os.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr()); + } + } + + os.write(Utilities.newLineCode); + } + } + + private int showLocksNewFormat(HiveLockManager lockMgr) throws HiveException { + ShowLocksResponse response = getLocksForNewFormat(lockMgr); + + // write the results in the file + try (DataOutputStream os = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + dumpLockInfo(os, response); + } catch (IOException e) { + LOG.warn("show function: ", e); + return 1; + } catch (Exception e) { + throw new HiveException(e.toString()); + } + + return 0; + } + + private ShowLocksResponse getLocksForNewFormat(HiveLockManager lockMgr) throws HiveException, LockException { + if (!(lockMgr instanceof DbLockManager)) { + throw new HiveException("New lock format only supported with db lock manager."); + } + + ShowLocksRequest request = new ShowLocksRequest(); + if (desc.getDbName() == null && desc.getTableName() != null) { + request.setDbname(SessionState.get().getCurrentDatabase()); + } else { + request.setDbname(desc.getDbName()); + } + request.setTablename(desc.getTableName()); + if (desc.getPartSpec() != null) { + List keyList = new ArrayList(); + List valList = new ArrayList(); + for (String partKey : desc.getPartSpec().keySet()) { + String partVal = desc.getPartSpec().get(partKey); + keyList.add(partKey); + valList.add(partVal); + } + String partName = FileUtils.makePartName(keyList, valList); + request.setPartname(partName); + } + + return ((DbLockManager)lockMgr).getLocks(request); + } + + public static void dumpLockInfo(DataOutputStream os, ShowLocksResponse response) throws IOException { + // Write a header + os.writeBytes("Lock ID"); + os.write(Utilities.tabCode); + os.writeBytes("Database"); + os.write(Utilities.tabCode); + os.writeBytes("Table"); + os.write(Utilities.tabCode); + os.writeBytes("Partition"); + os.write(Utilities.tabCode); + os.writeBytes("State"); + os.write(Utilities.tabCode); + os.writeBytes("Blocked By"); + os.write(Utilities.tabCode); + os.writeBytes("Type"); + os.write(Utilities.tabCode); + os.writeBytes("Transaction ID"); + os.write(Utilities.tabCode); + os.writeBytes("Last Heartbeat"); + os.write(Utilities.tabCode); + os.writeBytes("Acquired At"); + os.write(Utilities.tabCode); + os.writeBytes("User"); + os.write(Utilities.tabCode); + os.writeBytes("Hostname"); + os.write(Utilities.tabCode); + os.writeBytes("Agent Info"); + os.write(Utilities.newLineCode); + + List locks = response.getLocks(); + if (locks != null) { + for (ShowLocksResponseElement lock : locks) { + if (lock.isSetLockIdInternal()) { + os.writeBytes(Long.toString(lock.getLockid()) + "." + Long.toString(lock.getLockIdInternal())); + } else { + os.writeBytes(Long.toString(lock.getLockid())); + } + os.write(Utilities.tabCode); + os.writeBytes(lock.getDbname()); + os.write(Utilities.tabCode); + os.writeBytes((lock.getTablename() == null) ? "NULL" : lock.getTablename()); + os.write(Utilities.tabCode); + os.writeBytes((lock.getPartname() == null) ? "NULL" : lock.getPartname()); + os.write(Utilities.tabCode); + os.writeBytes(lock.getState().toString()); + os.write(Utilities.tabCode); + if (lock.isSetBlockedByExtId()) {//both "blockedby" are either there or not + os.writeBytes(Long.toString(lock.getBlockedByExtId()) + "." + Long.toString(lock.getBlockedByIntId())); + } else { + os.writeBytes(" "); //12 chars - try to keep cols aligned + } + os.write(Utilities.tabCode); + os.writeBytes(lock.getType().toString()); + os.write(Utilities.tabCode); + os.writeBytes((lock.getTxnid() == 0) ? "NULL" : Long.toString(lock.getTxnid())); + os.write(Utilities.tabCode); + os.writeBytes(Long.toString(lock.getLastheartbeat())); + os.write(Utilities.tabCode); + os.writeBytes((lock.getAcquiredat() == 0) ? "NULL" : Long.toString(lock.getAcquiredat())); + os.write(Utilities.tabCode); + os.writeBytes(lock.getUser()); + os.write(Utilities.tabCode); + os.writeBytes(lock.getHostname()); + os.write(Utilities.tabCode); + os.writeBytes(lock.getAgentInfo() == null ? "NULL" : lock.getAgentInfo()); + os.write(Utilities.newLineCode); + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableDesc.java index 86050244eb..fcda7fa70a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.lock; import java.io.Serializable; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableOperation.java similarity index 96% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableOperation.java index 8b70e06ca6..d0691fdbdc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/UnlockTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.lock; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.Context; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AbortTxnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/package-info.java similarity index 62% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/AbortTxnsDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/package-info.java index e15d21549f..2daf50d4c9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AbortTxnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/lock/package-info.java @@ -15,22 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; -import java.io.Serializable; -import java.util.List; -/** - * Descriptor for aborting transactions. - */ -public class AbortTxnsDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - private final List txnids; - /** - * No arg constructor for serialization. - */ - public AbortTxnsDesc(List txnids) { - this.txnids = txnids; - } - public List getTxnids() { - return txnids; - } -} \ No newline at end of file + +/** Table locking related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table.lock; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/PreInsertTableDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/PreInsertTableDesc.java index 4bb609ef54..4b7c606956 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/PreInsertTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.misc; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.ddl.DDLTask2; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/PreInsertTableOperation.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/PreInsertTableOperation.java index 5d85d0a14c..d1136a5204 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/PreInsertTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.misc; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java index 1f0cd82690..6980667ff6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.misc; import java.io.Serializable; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableOperation.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableOperation.java index 9778bfac92..db8c3c7209 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.table.misc; import java.io.Serializable; import java.util.ArrayList; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/package-info.java similarity index 50% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/package-info.java index 8696aaa74c..70967b4ec2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/package-info.java @@ -15,41 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; -import org.apache.hadoop.fs.Path; - -import java.io.Serializable; - -/** - * Descriptor for showing transactions. - */ -public class ShowTxnsDesc extends DDLDesc implements Serializable { - - private static final long serialVersionUID = 1L; - private static final String schema = "txnid,state,startedtime,lastheartbeattime,user,host#string:string:string:string:string:string"; - - private String resFile; - - /** - * - * @param resFile File that results of show will be written to. - */ - public ShowTxnsDesc(Path resFile) { - this.resFile = resFile.toString(); - } - - /** - * No arg constructor for serialization. - */ - public ShowTxnsDesc() { - } - - public String getSchema() { - return schema; - } - - public String getResFile() { - return resFile; - } -} +/** Miscellaneous table related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table.misc; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewDesc.java similarity index 93% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewDesc.java index f0afccacb5..dc379d7aac 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewDesc.java @@ -16,12 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.alter; +package org.apache.hadoop.hive.ql.ddl.view; import java.io.Serializable; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.alter.AlterMaterializedViewTypes; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewRewriteDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteDesc.java similarity index 87% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewRewriteDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteDesc.java index 72cc84c5cd..aefbe8df08 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewRewriteDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteDesc.java @@ -16,9 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.alter; +package org.apache.hadoop.hive.ql.ddl.view; import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.alter.AlterMaterializedViewDesc; +import org.apache.hadoop.hive.ql.ddl.alter.AlterMaterializedViewRewriteDesc; +import org.apache.hadoop.hive.ql.ddl.alter.AlterMaterializedViewRewriteOperation; +import org.apache.hadoop.hive.ql.ddl.alter.AlterMaterializedViewTypes; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewRewriteOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteOperation.java similarity index 96% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewRewriteOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteOperation.java index 379f472452..acd09d9d5e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/AlterMaterializedViewRewriteOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterMaterializedViewRewriteOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.alter; +package org.apache.hadoop.hive.ql.ddl.view; import org.apache.calcite.rel.RelNode; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.alter.AlterMaterializedViewRewriteDesc; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.CalcitePlanner; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewDesc.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewDesc.java index ebc7e00b7d..595da9c5ea 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.view; import java.io.Serializable; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewOperation.java index f8d95aa747..b32413eab4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/CreateViewOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table; +package org.apache.hadoop.hive.ql.ddl.view; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/package-info.java similarity index 88% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/package-info.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/view/package-info.java index b113f39db8..5908b420b2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/alter/package-info.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/package-info.java @@ -16,5 +16,5 @@ * limitations under the License. */ -/** Alter DDL operation descriptions and operations. */ -package org.apache.hadoop.hive.ql.ddl.alter; +/** View related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.view; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 7c9d910c20..3d4ba0110a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -19,13 +19,11 @@ package org.apache.hadoop.hive.ql.exec; import java.io.DataOutputStream; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.Serializable; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.Iterator; import java.util.LinkedHashMap; @@ -59,7 +57,6 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -67,16 +64,11 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; -import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; -import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; -import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.CompilationOpContext; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryPlan; @@ -92,13 +84,6 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcSerde; import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe; -import org.apache.hadoop.hive.ql.lockmgr.DbLockManager; -import org.apache.hadoop.hive.ql.lockmgr.HiveLock; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; -import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; @@ -114,7 +99,6 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; @@ -126,7 +110,6 @@ import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc; -import org.apache.hadoop.hive.ql.plan.KillQueryDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; @@ -137,11 +120,8 @@ import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.ReplRemoveFirstIncLoadPendFlagDesc; import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; -import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; @@ -279,26 +259,6 @@ public int execute(DriverContext driverContext) { return showColumns(db, showCols); } - ShowLocksDesc showLocks = work.getShowLocksDesc(); - if (showLocks != null) { - return showLocks(db, showLocks); - } - - ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc(); - if (compactionsDesc != null) { - return showCompactions(db, compactionsDesc); - } - - ShowTxnsDesc txnsDesc = work.getShowTxnsDesc(); - if (txnsDesc != null) { - return showTxns(db, txnsDesc); - } - - AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc(); - if (abortTxnsDesc != null) { - return abortTxns(db, abortTxnsDesc); - } - ShowPartitionsDesc showParts = work.getShowPartsDesc(); if (showParts != null) { return showPartitions(db, showParts); @@ -334,11 +294,6 @@ public int execute(DriverContext driverContext) { return insertCommitWork(db, insertCommitHookDesc); } - KillQueryDesc killQueryDesc = work.getKillQueryDesc(); - if (killQueryDesc != null) { - return killQuery(db, killQueryDesc); - } - if (work.getReplSetFirstIncLoadFlagDesc() != null) { return remFirstIncPendFlag(db, work.getReplSetFirstIncLoadFlagDesc()); } @@ -1505,352 +1460,6 @@ public int compare(FieldSchema f1, FieldSchema f2) { return new ArrayList(sortedCol); } - /** - * Write a list of the current locks to a file. - * @param db - * - * @param showLocks - * the locks we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showLocks(Hive db, ShowLocksDesc showLocks) throws HiveException { - Context ctx = driverContext.getCtx(); - HiveTxnManager txnManager = ctx.getHiveTxnManager(); - HiveLockManager lockMgr = txnManager.getLockManager(); - - if (txnManager.useNewShowLocksFormat()) { - return showLocksNewFormat(showLocks, lockMgr); - } - - boolean isExt = showLocks.isExt(); - if (lockMgr == null) { - throw new HiveException("show Locks LockManager not specified"); - } - - // write the results in the file - DataOutputStream outStream = getOutputStream(showLocks.getResFile()); - try { - List locks = null; - - if (showLocks.getTableName() == null) { - // TODO should be doing security check here. Users should not be - // able to see each other's locks. - locks = lockMgr.getLocks(false, isExt); - } - else { - locks = lockMgr.getLocks(HiveLockObject.createFrom(db, - showLocks.getTableName(), showLocks.getPartSpec()), - true, isExt); - } - - Collections.sort(locks, new Comparator() { - - @Override - public int compare(HiveLock o1, HiveLock o2) { - int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName()); - if (cmp == 0) { - if (o1.getHiveLockMode() == o2.getHiveLockMode()) { - return cmp; - } - // EXCLUSIVE locks occur before SHARED locks - if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) { - return -1; - } - return +1; - } - return cmp; - } - - }); - - Iterator locksIter = locks.iterator(); - - while (locksIter.hasNext()) { - HiveLock lock = locksIter.next(); - outStream.writeBytes(lock.getHiveLockObject().getDisplayName()); - outStream.write(separator); - outStream.writeBytes(lock.getHiveLockMode().toString()); - if (isExt) { - HiveLockObjectData lockData = lock.getHiveLockObject().getData(); - if (lockData != null) { - outStream.write(terminator); - outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId()); - outStream.write(terminator); - outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime()); - outStream.write(terminator); - outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode()); - outStream.write(terminator); - outStream.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr()); - } - } - outStream.write(terminator); - } - } catch (FileNotFoundException e) { - LOG.warn("show function: ", e); - return 1; - } catch (IOException e) { - LOG.warn("show function: ", e); - return 1; - } catch (Exception e) { - throw new HiveException(e.toString(), e); - } finally { - IOUtils.closeStream(outStream); - } - return 0; - } - public static void dumpLockInfo(DataOutputStream os, ShowLocksResponse rsp) throws IOException { - // Write a header - os.writeBytes("Lock ID"); - os.write(separator); - os.writeBytes("Database"); - os.write(separator); - os.writeBytes("Table"); - os.write(separator); - os.writeBytes("Partition"); - os.write(separator); - os.writeBytes("State"); - os.write(separator); - os.writeBytes("Blocked By"); - os.write(separator); - os.writeBytes("Type"); - os.write(separator); - os.writeBytes("Transaction ID"); - os.write(separator); - os.writeBytes("Last Heartbeat"); - os.write(separator); - os.writeBytes("Acquired At"); - os.write(separator); - os.writeBytes("User"); - os.write(separator); - os.writeBytes("Hostname"); - os.write(separator); - os.writeBytes("Agent Info"); - os.write(terminator); - - List locks = rsp.getLocks(); - if (locks != null) { - for (ShowLocksResponseElement lock : locks) { - if(lock.isSetLockIdInternal()) { - os.writeBytes(Long.toString(lock.getLockid()) + "." + Long.toString(lock.getLockIdInternal())); - } - else { - os.writeBytes(Long.toString(lock.getLockid())); - } - os.write(separator); - os.writeBytes(lock.getDbname()); - os.write(separator); - os.writeBytes((lock.getTablename() == null) ? "NULL" : lock.getTablename()); - os.write(separator); - os.writeBytes((lock.getPartname() == null) ? "NULL" : lock.getPartname()); - os.write(separator); - os.writeBytes(lock.getState().toString()); - os.write(separator); - if(lock.isSetBlockedByExtId()) {//both "blockedby" are either there or not - os.writeBytes(Long.toString(lock.getBlockedByExtId()) + "." + Long.toString(lock.getBlockedByIntId())); - } - else { - os.writeBytes(" ");//12 chars - try to keep cols aligned - } - os.write(separator); - os.writeBytes(lock.getType().toString()); - os.write(separator); - os.writeBytes((lock.getTxnid() == 0) ? "NULL" : Long.toString(lock.getTxnid())); - os.write(separator); - os.writeBytes(Long.toString(lock.getLastheartbeat())); - os.write(separator); - os.writeBytes((lock.getAcquiredat() == 0) ? "NULL" : Long.toString(lock.getAcquiredat())); - os.write(separator); - os.writeBytes(lock.getUser()); - os.write(separator); - os.writeBytes(lock.getHostname()); - os.write(separator); - os.writeBytes(lock.getAgentInfo() == null ? "NULL" : lock.getAgentInfo()); - os.write(separator); - os.write(terminator); - } - } - } - private int showLocksNewFormat(ShowLocksDesc showLocks, HiveLockManager lm) - throws HiveException { - - DbLockManager lockMgr; - if (!(lm instanceof DbLockManager)) { - throw new RuntimeException("New lock format only supported with db lock manager."); - } - lockMgr = (DbLockManager)lm; - - String dbName = showLocks.getDbName(); - String tblName = showLocks.getTableName(); - Map partSpec = showLocks.getPartSpec(); - if (dbName == null && tblName != null) { - dbName = SessionState.get().getCurrentDatabase(); - } - - ShowLocksRequest rqst = new ShowLocksRequest(); - rqst.setDbname(dbName); - rqst.setTablename(tblName); - if (partSpec != null) { - List keyList = new ArrayList(); - List valList = new ArrayList(); - for (String partKey : partSpec.keySet()) { - String partVal = partSpec.remove(partKey); - keyList.add(partKey); - valList.add(partVal); - } - String partName = FileUtils.makePartName(keyList, valList); - rqst.setPartname(partName); - } - - ShowLocksResponse rsp = lockMgr.getLocks(rqst); - - // write the results in the file - DataOutputStream os = getOutputStream(showLocks.getResFile()); - try { - dumpLockInfo(os, rsp); - } catch (FileNotFoundException e) { - LOG.warn("show function: ", e); - return 1; - } catch (IOException e) { - LOG.warn("show function: ", e); - return 1; - } catch (Exception e) { - throw new HiveException(e.toString()); - } finally { - IOUtils.closeStream(os); - } - return 0; - } - - private int showCompactions(Hive db, ShowCompactionsDesc desc) throws HiveException { - // Call the metastore to get the status of all known compactions (completed get purged eventually) - ShowCompactResponse rsp = db.showCompactions(); - - // Write the results into the file - final String noVal = " --- "; - - DataOutputStream os = getOutputStream(desc.getResFile()); - try { - // Write a header - os.writeBytes("CompactionId"); - os.write(separator); - os.writeBytes("Database"); - os.write(separator); - os.writeBytes("Table"); - os.write(separator); - os.writeBytes("Partition"); - os.write(separator); - os.writeBytes("Type"); - os.write(separator); - os.writeBytes("State"); - os.write(separator); - os.writeBytes("Hostname"); - os.write(separator); - os.writeBytes("Worker"); - os.write(separator); - os.writeBytes("Start Time"); - os.write(separator); - os.writeBytes("Duration(ms)"); - os.write(separator); - os.writeBytes("HadoopJobId"); - os.write(terminator); - - if (rsp.getCompacts() != null) { - for (ShowCompactResponseElement e : rsp.getCompacts()) { - os.writeBytes(Long.toString(e.getId())); - os.write(separator); - os.writeBytes(e.getDbname()); - os.write(separator); - os.writeBytes(e.getTablename()); - os.write(separator); - String part = e.getPartitionname(); - os.writeBytes(part == null ? noVal : part); - os.write(separator); - os.writeBytes(e.getType().toString()); - os.write(separator); - os.writeBytes(e.getState()); - os.write(separator); - String wid = e.getWorkerid(); - os.writeBytes(wid == null ? noVal : wid.split("-")[0]); - os.write(separator); - os.writeBytes(wid == null ? noVal : wid.split("-")[1]); - os.write(separator); - os.writeBytes(e.isSetStart() ? Long.toString(e.getStart()) : noVal); - os.write(separator); - os.writeBytes(e.isSetEndTime() ? Long.toString(e.getEndTime() - e.getStart()) : noVal); - os.write(separator); - os.writeBytes(e.isSetHadoopJobId() ? e.getHadoopJobId() : noVal); - os.write(terminator); - } - } - } catch (IOException e) { - LOG.warn("show compactions: ", e); - return 1; - } finally { - IOUtils.closeStream(os); - } - return 0; - } - - private int showTxns(Hive db, ShowTxnsDesc desc) throws HiveException { - // Call the metastore to get the currently queued and running compactions. - GetOpenTxnsInfoResponse rsp = db.showTransactions(); - - // Write the results into the file - DataOutputStream os = getOutputStream(desc.getResFile()); - try { - // Write a header - os.writeBytes("Transaction ID"); - os.write(separator); - os.writeBytes("Transaction State"); - os.write(separator); - os.writeBytes("Started Time"); - os.write(separator); - os.writeBytes("Last Heartbeat Time"); - os.write(separator); - os.writeBytes("User"); - os.write(separator); - os.writeBytes("Hostname"); - os.write(terminator); - - for (TxnInfo txn : rsp.getOpen_txns()) { - os.writeBytes(Long.toString(txn.getId())); - os.write(separator); - os.writeBytes(txn.getState().toString()); - os.write(separator); - os.writeBytes(Long.toString(txn.getStartedTime())); - os.write(separator); - os.writeBytes(Long.toString(txn.getLastHeartbeatTime())); - os.write(separator); - os.writeBytes(txn.getUser()); - os.write(separator); - os.writeBytes(txn.getHostname()); - os.write(terminator); - } - } catch (IOException e) { - LOG.warn("show transactions: ", e); - return 1; - } finally { - IOUtils.closeStream(os); - } - return 0; - } - - private int abortTxns(Hive db, AbortTxnsDesc desc) throws HiveException { - db.abortTransactions(desc.getTxnids()); - return 0; - } - - private int killQuery(Hive db, KillQueryDesc desc) throws HiveException { - SessionState sessionState = SessionState.get(); - for (String queryId : desc.getQueryIds()) { - sessionState.getKillQuery().killQuery(queryId, "User invoked KILL QUERY", db.getConf()); - } - LOG.info("kill query called ({})", desc.getQueryIds()); - return 0; - } - /** * Alter a given table. * diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index b335f199f8..43a4067757 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.DropTableDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 24fc0d5437..6f4931c700 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -53,7 +53,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.Entity; import org.apache.hadoop.hive.ql.hooks.ReadEntity; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java index 1a042783b0..5ced5c5a75 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java @@ -19,7 +19,6 @@ import com.google.common.collect.ImmutableList; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.exec.DDLTask; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.JavaUtils; @@ -28,6 +27,7 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.ql.Driver.LockedDriverState; +import org.apache.hadoop.hive.ql.ddl.table.lock.ShowLocksOperation; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.thrift.TException; @@ -186,7 +186,7 @@ private void showLocksNewFormat(String preamble) throws LockException { ByteArrayOutputStream baos = new ByteArrayOutputStream(1024*2); DataOutputStream os = new DataOutputStream(baos); try { - DDLTask.dumpLockInfo(os, rsp); + ShowLocksOperation.dumpLockInfo(os, rsp); os.flush(); LOG.info(baos.toString()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 800d80a067..d412dd72d1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -41,8 +41,8 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; -import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.UnlockTableDesc; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index bb46bf9490..744759ede3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -26,8 +26,8 @@ import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; -import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.UnlockTableDesc; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java index 0e148ed396..43d794fc46 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; -import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.UnlockTableDesc; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.ErrorMsg; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index c9dd8541c2..5bc47e2eb9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc; import org.apache.hadoop.hive.ql.metadata.CheckConstraint; import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java index 960dd34a8c..76415cf7e2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java @@ -38,8 +38,8 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableLikeDesc; -import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.DropTableDesc; import org.apache.hadoop.hive.ql.exec.StatsTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index bd6ae715d6..3bf2a43b01 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -71,7 +71,6 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.alter.AlterMaterializedViewRewriteDesc; import org.apache.hadoop.hive.ql.ddl.database.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.DescDatabaseDesc; @@ -88,15 +87,21 @@ import org.apache.hadoop.hive.ql.ddl.privilege.ShowPrincipalsDesc; import org.apache.hadoop.hive.ql.ddl.privilege.ShowRoleGrantDesc; import org.apache.hadoop.hive.ql.ddl.privilege.ShowRolesDesc; -import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.ShowCreateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.ddl.table.ShowTablesDesc; -import org.apache.hadoop.hive.ql.ddl.table.ShowTablePropertiesDesc; -import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; +import org.apache.hadoop.hive.ql.ddl.process.AbortTransactionsDesc; +import org.apache.hadoop.hive.ql.ddl.process.KillQueriesDesc; +import org.apache.hadoop.hive.ql.ddl.process.ShowCompactionsDesc; +import org.apache.hadoop.hive.ql.ddl.process.ShowTransactionsDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.ShowCreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablePropertiesDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablesDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.ShowLocksDesc; +import org.apache.hadoop.hive.ql.ddl.table.lock.UnlockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; +import org.apache.hadoop.hive.ql.ddl.view.AlterMaterializedViewRewriteDesc; import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolAddTriggerDesc; import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolDropTriggerDesc; import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterResourcePlanDesc; @@ -140,7 +145,6 @@ import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils; import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactory; import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl; -import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; @@ -160,7 +164,6 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.KillQueryDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; @@ -168,11 +171,8 @@ import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; -import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.session.SessionState; @@ -2851,8 +2851,7 @@ private void analyzeShowLocks(ASTNode ast) throws SemanticException { ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), tableName, partSpec, isExtended, txnManager.useNewShowLocksFormat()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showLocksDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showLocksDesc))); setFetchTask(createFetchTask(showLocksDesc.getSchema())); // Need to initialize the lock manager @@ -2881,8 +2880,7 @@ private void analyzeShowDbLocks(ASTNode ast) throws SemanticException { ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), dbName, isExtended, txnManager.useNewShowLocksFormat()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showLocksDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showLocksDesc))); setFetchTask(createFetchTask(showLocksDesc.getSchema())); // Need to initialize the lock manager @@ -3008,8 +3006,8 @@ private void analyzeLockTable(ASTNode ast) */ private void analyzeShowCompactions(ASTNode ast) throws SemanticException { ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - setFetchTask(createFetchTask(desc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); + setFetchTask(createFetchTask(ShowCompactionsDesc.SCHEMA)); } /** @@ -3018,9 +3016,9 @@ private void analyzeShowCompactions(ASTNode ast) throws SemanticException { * @throws SemanticException Parsing failed. */ private void analyzeShowTxns(ASTNode ast) throws SemanticException { - ShowTxnsDesc desc = new ShowTxnsDesc(ctx.getResFile()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - setFetchTask(createFetchTask(desc.getSchema())); + ShowTransactionsDesc desc = new ShowTransactionsDesc(ctx.getResFile()); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); + setFetchTask(createFetchTask(ShowTransactionsDesc.SCHEMA)); } /** @@ -3034,8 +3032,8 @@ private void analyzeAbortTxns(ASTNode ast) throws SemanticException { for (int i = 0; i < numChildren; i++) { txnids.add(Long.parseLong(ast.getChild(i).getText())); } - AbortTxnsDesc desc = new AbortTxnsDesc(txnids); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + AbortTransactionsDesc desc = new AbortTransactionsDesc(txnids); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } /** @@ -3050,8 +3048,8 @@ private void analyzeKillQuery(ASTNode ast) throws SemanticException { queryIds.add(stripQuotes(ast.getChild(i).getText())); } addServiceOutput(); - KillQueryDesc desc = new KillQueryDesc(queryIds); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + KillQueriesDesc desc = new KillQueriesDesc(queryIds); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void addServiceOutput() throws SemanticException { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 07b40c996f..38d9940bd0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.DropTableDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index c4e6e5cc53..1c6d4ac897 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.QueryProperties; import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.GroupByOperator; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java index 4c1e2a27cb..18de383e5f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java @@ -30,8 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; import org.apache.hadoop.hive.ql.metadata.Table; /** diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 0e58fe20b4..5bb17d556a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -101,10 +101,10 @@ import org.apache.hadoop.hive.ql.cache.results.CacheUsage; import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableLikeDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateViewDesc; -import org.apache.hadoop.hive.ql.ddl.table.PreInsertTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.PreInsertTableDesc; +import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 67d27cdd87..fa70820934 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -35,9 +35,9 @@ import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.alter.AlterMaterializedViewRewriteDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.view.AlterMaterializedViewRewriteDesc; +import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java index edef74e618..b1f820ce40 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.DropTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java index 05a9f9123f..43a976cacd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java index 5ef66fafa4..18531266d3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java deleted file mode 100644 index ce7589492f..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -@Explain(displayName = "Create/Drop Trigger to pool mappings", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateOrDropTriggerToPoolMappingDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 383046258694558029L; - - private String resourcePlanName; - private String triggerName; - private String poolPath; - private boolean isUnmanagedPool; - private boolean drop; - - public CreateOrDropTriggerToPoolMappingDesc() {} - - public CreateOrDropTriggerToPoolMappingDesc(String resourcePlanName, String triggerName, - String poolPath, boolean drop, boolean isUnmanagedPool) { - this.resourcePlanName = resourcePlanName; - this.triggerName = triggerName; - this.poolPath = poolPath; - this.isUnmanagedPool = isUnmanagedPool; - this.drop = drop; - } - - @Explain(displayName = "resourcePlanName", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getResourcePlanName() { - return resourcePlanName; - } - - public void setResourcePlanName(String resourcePlanName) { - this.resourcePlanName = resourcePlanName; - } - - @Explain(displayName = "Trigger name", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTriggerName() { - return triggerName; - } - - public void setTriggerName(String triggerName) { - this.triggerName = triggerName; - } - - @Explain(displayName = "Pool path", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPoolPathForExplain() { - return isUnmanagedPool ? "" : poolPath; - } - - public String getPoolPath() { - return poolPath; - } - - public boolean isUnmanagedPool() { - return isUnmanagedPool; - } - - public void setPoolPath(String poolPath) { - this.poolPath = poolPath; - } - - @Explain(displayName = "drop or create", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public boolean shouldDrop() { - return drop; - } - - public void setDrop(boolean drop) { - this.drop = drop; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 1901defa27..0505e07db4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -37,10 +37,6 @@ private DropPartitionDesc dropPartitionDesc; private AlterTableDesc alterTblDesc; private ShowColumnsDesc showColumnsDesc; - private ShowLocksDesc showLocksDesc; - private ShowCompactionsDesc showCompactionsDesc; - private ShowTxnsDesc showTxnsDesc; - private AbortTxnsDesc abortTxnsDesc; private ShowPartitionsDesc showPartsDesc; private AddPartitionDesc addPartitionDesc; private RenamePartitionDesc renamePartitionDesc; @@ -48,7 +44,6 @@ private MsckDesc msckDesc; private AlterTableAlterPartDesc alterTableAlterPartDesc; private AlterTableExchangePartition alterTableExchangePartition; - private KillQueryDesc killQueryDesc; private ShowConfDesc showConfDesc; @@ -108,34 +103,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.showColumnsDesc = showColumnsDesc; } - /** - * @param showLocksDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowLocksDesc showLocksDesc) { - this(inputs, outputs); - - this.showLocksDesc = showLocksDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - ShowCompactionsDesc showCompactionsDesc) { - this(inputs, outputs); - this.showCompactionsDesc = showCompactionsDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - ShowTxnsDesc showTxnsDesc) { - this(inputs, outputs); - this.showTxnsDesc = showTxnsDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - AbortTxnsDesc abortTxnsDesc) { - this(inputs, outputs); - this.abortTxnsDesc = abortTxnsDesc; - } - /** * @param showPartsDesc */ @@ -218,12 +185,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.insertCommitHookDesc = insertCommitHookDesc; } - public DDLWork(HashSet inputs, HashSet outputs, - KillQueryDesc killQueryDesc) { - this(inputs, outputs); - this.killQueryDesc = killQueryDesc; - } - public DDLWork(HashSet inputs, HashSet outputs, ReplRemoveFirstIncLoadPendFlagDesc replSetFirstIncLoadFlagDesc) { this(inputs, outputs); @@ -254,34 +215,6 @@ public ShowColumnsDesc getShowColumnsDesc() { return showColumnsDesc; } - /** - * @return the showLocksDesc - */ - @Explain(displayName = "Show Lock Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowLocksDesc getShowLocksDesc() { - return showLocksDesc; - } - - @Explain(displayName = "Show Compactions Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowCompactionsDesc getShowCompactionsDesc() { - return showCompactionsDesc; - } - - @Explain(displayName = "Show Transactions Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowTxnsDesc getShowTxnsDesc() { - return showTxnsDesc; - } - - @Explain(displayName = "Abort Transactions Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public AbortTxnsDesc getAbortTxnsDesc() { - return abortTxnsDesc; - } - - @Explain(displayName = "Kill Query Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public KillQueryDesc getKillQueryDesc() { - return killQueryDesc; - } - /** * @return the showPartsDesc */ diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index dd3af1b35c..19534d1954 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 5d4e93e74a..7e5e72a293 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -21,8 +21,8 @@ import java.io.Serializable; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 15e922ebd5..b1a04b3a50 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.CreateViewDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.TableScanOperator; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java deleted file mode 100644 index 2974cd9ac0..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.HashMap; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * ShowLocksDesc. - * - */ -@Explain(displayName = "Show Locks", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowLocksDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - String resFile; - String dbName; - String tableName; - HashMap partSpec; - boolean isExt; - boolean isNewLockFormat; - - /** - * table name for the result of show locks. - */ - private static final String table = "showlocks"; - /** - * thrift ddl for the result of show locks. - */ - private static final String schema = "tab_name,mode#string:string"; - - /** - * Schema for use with db txn manager. - */ - private static final String newFormatSchema = "lockid,database,table,partition,lock_state," + - "blocked_by,lock_type,transaction_id,last_heartbeat,acquired_at,user,hostname,agent_info#" + - "string:string:string:string:string:string:string:string:string:string:string:string:string"; - - public String getDatabase() { - return dbName; - } - - public String getTable() { - return table; - } - - public String getSchema() { - if (isNewLockFormat) return newFormatSchema; - else return schema; - } - - public ShowLocksDesc() { - } - - /** - * @param resFile - */ - public ShowLocksDesc(Path resFile, String dbName, boolean isExt, boolean isNewFormat) { - this.resFile = resFile.toString(); - this.partSpec = null; - this.tableName = null; - this.isExt = isExt; - this.dbName = dbName; - isNewLockFormat = isNewFormat; - } - - /** - * @param resFile - */ - public ShowLocksDesc(Path resFile, String tableName, - HashMap partSpec, boolean isExt, boolean isNewFormat) { - this.resFile = resFile.toString(); - this.partSpec = partSpec; - this.tableName = tableName; - this.isExt = isExt; - isNewLockFormat = isNewFormat; - } - - public String getDbName() { - return dbName; - } - - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** - * @return the tableName - */ - @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return the partSpec - */ - @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public HashMap getPartSpec() { - return partSpec; - } - - /** - * @param partSpec - * the partSpec to set - */ - public void setPartSpecs(HashMap partSpec) { - this.partSpec = partSpec; - } - - /** - * @return the resFile - */ - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the isExt - */ - public boolean isExt() { - return isExt; - } - - /** - * @param isExt - * the isExt to set - */ - public void setExt(boolean isExt) { - this.isExt = isExt; - } - - public boolean isNewFormat() { - return isNewLockFormat; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index aa12ddb6af..97ef823bfd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -62,7 +62,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.ql.DriverUtils; -import org.apache.hadoop.hive.ql.ddl.table.ShowCreateTableOperation; +import org.apache.hadoop.hive.ql.ddl.table.creation.ShowCreateTableOperation; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.ql.io.AcidInputFormat; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java index ae22b7f47a..5f733fc818 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.DDLWork2; -import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; import org.junit.Test; diff --git ql/src/test/queries/clientpositive/dbtxnmgr_abort.q ql/src/test/queries/clientpositive/dbtxnmgr_abort.q index d9239958f0..da23e09eab 100644 --- ql/src/test/queries/clientpositive/dbtxnmgr_abort.q +++ ql/src/test/queries/clientpositive/dbtxnmgr_abort.q @@ -1,6 +1,7 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +explain abort transactions 1234 5678; abort transactions 1234 5678; abort transactions 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010; diff --git ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q index 24a42ea7bc..4f29e4b808 100644 --- ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q +++ ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q @@ -7,10 +7,12 @@ show locks extended; show locks default; +explain show transactions; show transactions; create table partitioned_acid_table (a int, b int) partitioned by (p string) clustered by (a) into 2 buckets stored as orc tblproperties ('transactional'='true'); +explain show locks database default; show locks database default; show locks partitioned_acid_table; @@ -19,12 +21,14 @@ show locks partitioned_acid_table extended; show locks partitioned_acid_table partition (p='abc'); +explain show locks partitioned_acid_table partition (p='abc') extended; show locks partitioned_acid_table partition (p='abc') extended; insert into partitioned_acid_table partition(p='abc') values(1,2); alter table partitioned_acid_table partition(p='abc') compact 'minor'; +explain show compactions; show compactions; drop table partitioned_acid_table; diff --git ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out index aaf1c7835f..03c6724ec2 100644 --- ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out +++ ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out @@ -13,6 +13,24 @@ PREHOOK: type: SHOWLOCKS POSTHOOK: query: show locks default POSTHOOK: type: SHOWLOCKS Lock ID Database Table Partition State Blocked By Type Transaction ID Last Heartbeat Acquired At User Hostname Agent Info +PREHOOK: query: explain show transactions +PREHOOK: type: SHOW TRANSACTIONS +POSTHOOK: query: explain show transactions +POSTHOOK: type: SHOW TRANSACTIONS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Transactions + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: show transactions PREHOOK: type: SHOW TRANSACTIONS POSTHOOK: query: show transactions @@ -26,6 +44,25 @@ POSTHOOK: query: create table partitioned_acid_table (a int, b int) partitioned POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partitioned_acid_table +PREHOOK: query: explain show locks database default +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: explain show locks database default +POSTHOOK: type: SHOWLOCKS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Locks + dbName: default + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: show locks database default PREHOOK: type: SHOWLOCKS POSTHOOK: query: show locks database default @@ -46,6 +83,27 @@ PREHOOK: type: SHOWLOCKS POSTHOOK: query: show locks partitioned_acid_table partition (p='abc') POSTHOOK: type: SHOWLOCKS Lock ID Database Table Partition State Blocked By Type Transaction ID Last Heartbeat Acquired At User Hostname Agent Info +PREHOOK: query: explain show locks partitioned_acid_table partition (p='abc') extended +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: explain show locks partitioned_acid_table partition (p='abc') extended +POSTHOOK: type: SHOWLOCKS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Locks + partition: + p abc + table: partitioned_acid_table + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: show locks partitioned_acid_table partition (p='abc') extended PREHOOK: type: SHOWLOCKS POSTHOOK: query: show locks partitioned_acid_table partition (p='abc') extended @@ -65,6 +123,24 @@ PREHOOK: query: alter table partitioned_acid_table partition(p='abc') compact 'm PREHOOK: type: ALTERTABLE_COMPACT POSTHOOK: query: alter table partitioned_acid_table partition(p='abc') compact 'minor' POSTHOOK: type: ALTERTABLE_COMPACT +PREHOOK: query: explain show compactions +PREHOOK: type: SHOW COMPACTIONS +POSTHOOK: query: explain show compactions +POSTHOOK: type: SHOW COMPACTIONS +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Compactions + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: show compactions PREHOOK: type: SHOW COMPACTIONS POSTHOOK: query: show compactions diff --git ql/src/test/results/clientpositive/kill_query.q.out ql/src/test/results/clientpositive/kill_query.q.out index 32c3f6ac57..239706f663 100644 --- ql/src/test/results/clientpositive/kill_query.q.out +++ ql/src/test/results/clientpositive/kill_query.q.out @@ -8,9 +8,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Kill Query Operator: - Kill Query - Query IDs: query_1244656 + Kill Query + Query IDs: query_1244656 PREHOOK: query: explain kill query 'query_123456677' 'query_1238503495' PREHOOK: type: KILL QUERY @@ -22,9 +21,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Kill Query Operator: - Kill Query - Query IDs: query_123456677, query_1238503495 + Kill Query + Query IDs: query_123456677, query_1238503495 PREHOOK: query: kill query 'query_1244656' PREHOOK: type: KILL QUERY diff --git ql/src/test/results/clientpositive/llap/kill_query.q.out ql/src/test/results/clientpositive/llap/kill_query.q.out index 32c3f6ac57..239706f663 100644 --- ql/src/test/results/clientpositive/llap/kill_query.q.out +++ ql/src/test/results/clientpositive/llap/kill_query.q.out @@ -8,9 +8,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Kill Query Operator: - Kill Query - Query IDs: query_1244656 + Kill Query + Query IDs: query_1244656 PREHOOK: query: explain kill query 'query_123456677' 'query_1238503495' PREHOOK: type: KILL QUERY @@ -22,9 +21,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Kill Query Operator: - Kill Query - Query IDs: query_123456677, query_1238503495 + Kill Query + Query IDs: query_123456677, query_1238503495 PREHOOK: query: kill query 'query_1244656' PREHOOK: type: KILL QUERY