diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java deleted file mode 100644 index 2df4a427d5..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; - -import java.io.Serializable; - -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.ql.ddl.DDLDesc; -import org.apache.hadoop.hive.ql.plan.Explain; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DDL task description for ALTER RESOURCE PLAN commands. - */ -@Explain(displayName = "Alter Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterResourcePlanDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = -3514685833183437279L; - - public static final String SCHEMA = "error#string"; - - private final WMNullableResourcePlan resourcePlan; - private final String planName; - private final boolean validate; - private final boolean isEnableActivate; - private final boolean isForceDeactivate; - private final boolean isReplace; - private final String resFile; - - public AlterResourcePlanDesc(WMNullableResourcePlan resourcePlan, String planName, boolean validate, - boolean isEnableActivate, boolean isForceDeactivate, boolean isReplace, String resFile) { - this.resourcePlan = resourcePlan; - this.planName = planName; - this.validate = validate; - this.isEnableActivate = isEnableActivate; - this.isForceDeactivate = isForceDeactivate; - this.isReplace = isReplace; - this.resFile = resFile; - } - - @Explain(displayName="Resource plan changed fields", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMNullableResourcePlan getResourcePlan() { - return resourcePlan; - } - - @Explain(displayName="Resource plan to modify", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPlanName() { - return planName; - } - - @Explain(displayName="shouldValidate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public boolean shouldValidate() { - return validate; - } - - public boolean isEnableActivate() { - return isEnableActivate; - } - - public boolean isForceDeactivate() { - return isForceDeactivate; - } - - public boolean isReplace() { - return isReplace; - } - - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java deleted file mode 100644 index 9f259dab94..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; - -import java.io.Serializable; - -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.ql.ddl.DDLDesc; -import org.apache.hadoop.hive.ql.plan.Explain; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DDL task description for ALTER ... MAPPING commands. - */ -@Explain(displayName = "Alter Mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterWMMappingDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = -442968568922083053L; - - private final WMMapping mapping; - - public AlterWMMappingDesc(WMMapping mapping) { - this.mapping = mapping; - } - - @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMMapping getMapping() { - return mapping; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java deleted file mode 100644 index 20f14ab0d6..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; - -import java.io.Serializable; - -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.ql.ddl.DDLDesc; -import org.apache.hadoop.hive.ql.plan.Explain; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DDL task description for ALTER POOL commands. - */ -@Explain(displayName = "Alter Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterWMPoolDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = 4872940135771213510L; - - private final WMNullablePool pool; - private final String poolPath; - - public AlterWMPoolDesc(WMNullablePool pool, String poolPath) { - this.pool = pool; - this.poolPath = poolPath; - } - - @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMNullablePool getPool() { - return pool; - } - - @Explain(displayName="poolPath", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPoolPath() { - return poolPath; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java deleted file mode 100644 index 9bc25161fc..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; - -import java.io.Serializable; - -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.ql.ddl.DDLDesc; -import org.apache.hadoop.hive.ql.plan.Explain; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DDL task description for ALTER TRIGGER commands. - */ -@Explain(displayName="Alter WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterWMTriggerDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = -2105736261687539210L; - - private final WMTrigger trigger; - - public AlterWMTriggerDesc(WMTrigger trigger) { - this.trigger = trigger; - } - - @Explain(displayName="trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMTrigger getTrigger() { - return trigger; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java deleted file mode 100644 index 16e89068e0..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; - -import java.io.Serializable; - -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.ql.ddl.DDLDesc; -import org.apache.hadoop.hive.ql.plan.Explain; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DDL task description for CREATE ... MAPPING commands. - */ -@Explain(displayName = "Create Mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateWMMappingDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = -442968568922083053L; - - private final WMMapping mapping; - - public CreateWMMappingDesc(WMMapping mapping) { - this.mapping = mapping; - } - - @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMMapping getMapping() { - return mapping; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java deleted file mode 100644 index 48ca2f5bbe..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; - -import java.io.Serializable; - -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.ql.ddl.DDLDesc; -import org.apache.hadoop.hive.ql.plan.Explain; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DDL task description for CREATE TRIGGER commands. - */ -@Explain(displayName="Create WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateWMTriggerDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = 1705317739121300923L; - - private final WMTrigger trigger; - - public CreateWMTriggerDesc(WMTrigger trigger) { - this.trigger = trigger; - } - - @Explain(displayName="trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMTrigger getTrigger() { - return trigger; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java index 4860ee7dcd..ea40f776ec 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java @@ -18,23 +18,24 @@ package org.apache.hadoop.hive.ql.ddl.workloadmanagement; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.wm.ExecutionTrigger; +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; /** * Common utilities for Workload Management related ddl operations. */ -final class WMUtils { +public final class WMUtils { private WMUtils() { throw new UnsupportedOperationException("WMUtils should not be instantiated"); } - static void validateTrigger(WMTrigger trigger) throws HiveException { - try { - ExecutionTrigger.fromWMTrigger(trigger); - } catch (IllegalArgumentException e) { - throw new HiveException(e); + public static String poolPath(Tree root) { + StringBuilder builder = new StringBuilder(); + builder.append(BaseSemanticAnalyzer.unescapeIdentifier(root.getText())); + for (int i = 0; i < root.getChildCount(); ++i) { + // DOT is not affected + builder.append(BaseSemanticAnalyzer.unescapeIdentifier(root.getChild(i).getText())); } + return builder.toString(); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/AbstractVMMappingAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/AbstractVMMappingAnalyzer.java new file mode 100644 index 0000000000..75deade8f8 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/AbstractVMMappingAnalyzer.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.WMUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.PlanUtils; + +/** + * Abstract ancestor of Create and Alter WM Mapping analyzers. + */ +public abstract class AbstractVMMappingAnalyzer extends BaseSemanticAnalyzer { + public AbstractVMMappingAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() < 4 || root.getChildCount() > 5) { + throw new SemanticException("Invalid syntax for create or alter mapping."); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String entityType = root.getChild(1).getText(); + String entityName = PlanUtils.stripQuotes(root.getChild(2).getText()); + String poolPath = root.getChild(3).getType() == HiveParser.TOK_UNMANAGED ? + null : WMUtils.poolPath(root.getChild(3)); // Null path => unmanaged + Integer ordering = root.getChildCount() == 5 ? Integer.valueOf(root.getChild(4).getText()) : null; + + DDLDesc desc = getDesc(resourcePlanName, entityType, entityName, poolPath, ordering); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } + + protected abstract DDLDesc getDesc(String resourcePlanName, String entityType, String entityName, String poolPath, + Integer ordering); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingAnalyzer.java new file mode 100644 index 0000000000..0a2c78408f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingAnalyzer.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.alter; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.AbstractVMMappingAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter mapping commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_MAPPING) +public class AlterWMMappingAnalyzer extends AbstractVMMappingAnalyzer { + public AlterWMMappingAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected DDLDesc getDesc(String resourcePlanName, String entityType, String entityName, String poolPath, + Integer ordering) { + return new AlterWMMappingDesc(resourcePlanName, entityType, entityName, poolPath, ordering); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingDesc.java new file mode 100644 index 0000000000..6378ddb1db --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingDesc.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.alter; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER ... MAPPING commands. + */ +@Explain(displayName = "Alter Mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMMappingDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + private final String resourcePlanName; + private final String entityType; + private final String entityName; + private final String poolPath; + private final Integer ordering; + + public AlterWMMappingDesc(String resourcePlanName, String entityType, String entityName, String poolPath, + Integer ordering) { + this.resourcePlanName = resourcePlanName; + this.entityType = entityType; + this.entityName = entityName; + this.poolPath = poolPath; + this.ordering = ordering; + } + + @Explain(displayName = "Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName = "Entity type", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityType() { + return entityType; + } + + @Explain(displayName = "Entity name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityName() { + return entityName; + } + + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } + + @Explain(displayName = "Ordering", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getOrdering() { + return ordering; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingOperation.java similarity index 75% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingOperation.java index 513e0c906c..8aad92de49 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/AlterWMMappingOperation.java @@ -16,10 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.alter; import java.io.IOException; +import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -34,7 +35,13 @@ public AlterWMMappingOperation(DDLOperationContext context, AlterWMMappingDesc d @Override public int execute() throws HiveException, IOException { - context.getDb().createOrUpdateWMMapping(desc.getMapping(), true); + WMMapping mapping = new WMMapping(desc.getResourcePlanName(), desc.getEntityType(), desc.getEntityName()); + mapping.setPoolPath(desc.getPoolPath()); + if (desc.getOrdering() != null) { + mapping.setOrdering(desc.getOrdering()); + } + + context.getDb().createOrUpdateWMMapping(mapping, true); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/package-info.java new file mode 100644 index 0000000000..bee2b391ff --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/alter/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter WM Mapping DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.alter; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingAnalyzer.java new file mode 100644 index 0000000000..065f020d1a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingAnalyzer.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.create; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.AbstractVMMappingAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for create mapping commands. + */ +@DDLType(type=HiveParser.TOK_CREATE_MAPPING) +public class CreateWMMappingAnalyzer extends AbstractVMMappingAnalyzer { + public CreateWMMappingAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected DDLDesc getDesc(String resourcePlanName, String entityType, String entityName, String poolPath, + Integer ordering) { + return new CreateWMMappingDesc(resourcePlanName, entityType, entityName, poolPath, ordering); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingDesc.java new file mode 100644 index 0000000000..e629de0bfc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingDesc.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.create; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for CREATE ... MAPPING commands. + */ +@Explain(displayName = "Create Mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMMappingDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + private final String resourcePlanName; + private final String entityType; + private final String entityName; + private final String poolPath; + private final Integer ordering; + + public CreateWMMappingDesc(String resourcePlanName, String entityType, String entityName, String poolPath, + Integer ordering) { + this.resourcePlanName = resourcePlanName; + this.entityType = entityType; + this.entityName = entityName; + this.poolPath = poolPath; + this.ordering = ordering; + } + + @Explain(displayName = "Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName = "Entity type", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityType() { + return entityType; + } + + @Explain(displayName = "Entity name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityName() { + return entityName; + } + + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } + + @Explain(displayName = "Ordering", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getOrdering() { + return ordering; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingOperation.java similarity index 75% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingOperation.java index b0c16e6a92..bfd6425695 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/CreateWMMappingOperation.java @@ -16,10 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.create; import java.io.IOException; +import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -34,7 +35,13 @@ public CreateWMMappingOperation(DDLOperationContext context, CreateWMMappingDesc @Override public int execute() throws HiveException, IOException { - context.getDb().createOrUpdateWMMapping(desc.getMapping(), false); + WMMapping mapping = new WMMapping(desc.getResourcePlanName(), desc.getEntityType(), desc.getEntityName()); + mapping.setPoolPath(desc.getPoolPath()); + if (desc.getOrdering() != null) { + mapping.setOrdering(desc.getOrdering()); + } + + context.getDb().createOrUpdateWMMapping(mapping, false); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/package-info.java new file mode 100644 index 0000000000..12c69f67a0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/create/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Create WM Mapping DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.create; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingAnalyzer.java new file mode 100644 index 0000000000..6baf8e1502 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingAnalyzer.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.drop; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.PlanUtils; + +/** + * Analyzer for drop mapping commands. + */ +@DDLType(type=HiveParser.TOK_DROP_MAPPING) +public class DropWMMappingAnalyzer extends BaseSemanticAnalyzer { + public DropWMMappingAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode ast) throws SemanticException { + if (ast.getChildCount() != 3) { + throw new SemanticException("Invalid syntax for drop mapping."); + } + + String resourcePlanName = unescapeIdentifier(ast.getChild(0).getText()); + String entityType = ast.getChild(1).getText(); + String entityName = PlanUtils.stripQuotes(ast.getChild(2).getText()); + + DropWMMappingDesc desc = new DropWMMappingDesc(resourcePlanName, entityType, entityName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingDesc.java similarity index 56% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingDesc.java index 56a6852ee8..f27c5492c9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingDesc.java @@ -16,11 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.drop; import java.io.Serializable; -import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -30,16 +29,30 @@ */ @Explain(displayName = "Drop mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public class DropWMMappingDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = -1567558687529244218L; + private static final long serialVersionUID = 1L; - private final WMMapping mapping; + private final String resourcePlanName; + private final String entityType; + private final String entityName; - public DropWMMappingDesc(WMMapping mapping) { - this.mapping = mapping; + public DropWMMappingDesc(String resourcePlanName, String entityType, String entityName) { + this.resourcePlanName = resourcePlanName; + this.entityType = entityType; + this.entityName = entityName; } - @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMMapping getMapping() { - return mapping; + @Explain(displayName = "Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName = "Entity type", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityType() { + return entityType; + } + + @Explain(displayName = "Entity name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityName() { + return entityName; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingOperation.java similarity index 82% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingOperation.java index 508ec4841c..7a5e0728bd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/DropWMMappingOperation.java @@ -16,10 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.drop; import java.io.IOException; +import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -34,7 +35,8 @@ public DropWMMappingOperation(DDLOperationContext context, DropWMMappingDesc des @Override public int execute() throws HiveException, IOException { - context.getDb().dropWMMapping(desc.getMapping()); + WMMapping mapping = new WMMapping(desc.getResourcePlanName(), desc.getEntityType(), desc.getEntityName()); + context.getDb().dropWMMapping(mapping); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/package-info.java new file mode 100644 index 0000000000..c10903e227 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/drop/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Drop WM Mapping DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping.drop; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/package-info.java new file mode 100644 index 0000000000..2d85cef902 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/mapping/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** WM Mapping DDL operations. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.mapping; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolAnalyzer.java new file mode 100644 index 0000000000..051136a80d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolAnalyzer.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.alter; + +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.WMUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.PlanUtils; + +/** + * Analyzer for alter pool commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_POOL) +public class AlterWMPoolAnalyzer extends BaseSemanticAnalyzer { + public AlterWMPoolAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() < 3) { + throw new SemanticException("Invalid syntax for alter pool: " + root.toStringTree()); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String poolPath = WMUtils.poolPath(root.getChild(1)); + Double allocFraction = null; + Integer queryParallelism = null; + String schedulingPolicy = null; + boolean removeSchedulingPolicy = false; + String newPath = null; + + for (int i = 2; i < root.getChildCount(); ++i) { + Tree child = root.getChild(i); + if (child.getChildCount() != 1) { + throw new SemanticException("Invalid syntax in alter pool expected parameter."); + } + Tree param = child.getChild(0); + switch (child.getType()) { + case HiveParser.TOK_ALLOC_FRACTION: + allocFraction = Double.parseDouble(param.getText()); + break; + case HiveParser.TOK_QUERY_PARALLELISM: + queryParallelism = Integer.parseInt(param.getText()); + break; + case HiveParser.TOK_SCHEDULING_POLICY: + if (param.getType() != HiveParser.TOK_NULL) { + schedulingPolicy = PlanUtils.stripQuotes(param.getText()); + } else { + removeSchedulingPolicy = true; + } + break; + case HiveParser.TOK_PATH: + newPath = WMUtils.poolPath(param); + break; + default: + throw new SemanticException("Incorrect alter syntax: " + child.toStringTree()); + } + } + + AlterWMPoolDesc desc = new AlterWMPoolDesc(resourcePlanName, poolPath, allocFraction, queryParallelism, + schedulingPolicy, removeSchedulingPolicy, newPath); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolDesc.java new file mode 100644 index 0000000000..e3c769b311 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolDesc.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.alter; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER POOL commands. + */ +@Explain(displayName = "Alter Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMPoolDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 4872940135771213510L; + + private final String resourcePlanName; + private final String poolPath; + private final Double allocFraction; + private final Integer queryParallelism; + private final String schedulingPolicy; + private final boolean removeSchedulingPolicy; + private final String newPath; + + public AlterWMPoolDesc(String resourcePlanName, String poolPath, Double allocFraction, Integer queryParallelism, + String schedulingPolicy, boolean removeSchedulingPolicy, String newPath) { + this.resourcePlanName = resourcePlanName; + this.poolPath = poolPath; + this.allocFraction = allocFraction; + this.queryParallelism = queryParallelism; + this.schedulingPolicy = schedulingPolicy; + this.removeSchedulingPolicy = removeSchedulingPolicy; + this.newPath = newPath; + } + + @Explain(displayName = "Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } + + @Explain(displayName = "Alloc fraction", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Double getAllocFraction() { + return allocFraction; + } + + @Explain(displayName = "Query parallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getQueryParallelism() { + return queryParallelism; + } + + @Explain(displayName = "Scheduling policy", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getSchedulingPolicy() { + return schedulingPolicy; + } + + @Explain(displayName = "Remove scheduling policy", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, + displayOnlyOnTrue=true) + public boolean isRemoveSchedulingPolicy() { + return removeSchedulingPolicy; + } + + @Explain(displayName = "New path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getNewPath() { + return newPath; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolOperation.java similarity index 63% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolOperation.java index 059d407bbd..3ba8944548 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/AlterWMPoolOperation.java @@ -16,10 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.alter; import java.io.IOException; +import org.apache.hadoop.hive.metastore.api.WMNullablePool; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -34,7 +35,22 @@ public AlterWMPoolOperation(DDLOperationContext context, AlterWMPoolDesc desc) { @Override public int execute() throws HiveException, IOException { - context.getDb().alterWMPool(desc.getPool(), desc.getPoolPath()); + WMNullablePool pool = new WMNullablePool(desc.getResourcePlanName(), desc.getPoolPath()); + if (desc.getAllocFraction() != null) { + pool.setAllocFraction(desc.getAllocFraction()); + } + if (desc.getQueryParallelism() != null) { + pool.setQueryParallelism(desc.getQueryParallelism()); + } + if (desc.getSchedulingPolicy() != null || desc.isRemoveSchedulingPolicy()) { + pool.setIsSetSchedulingPolicy(true); + pool.setSchedulingPolicy(desc.getSchedulingPolicy()); + } + if (desc.getNewPath() != null) { + pool.setPoolPath(desc.getNewPath()); + } + + context.getDb().alterWMPool(pool, desc.getPoolPath()); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/package-info.java new file mode 100644 index 0000000000..723774b9f2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/alter/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter Pool DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.alter; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolAnalyzer.java new file mode 100644 index 0000000000..0246be0761 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolAnalyzer.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.create; + +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.WMUtils; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.PlanUtils; + +/** + * Analyzer for create pool commands. + */ +@DDLType(type=HiveParser.TOK_CREATE_POOL) +public class CreateWMPoolAnalyzer extends BaseSemanticAnalyzer { + public CreateWMPoolAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + // TODO: allow defaults for e.g. scheduling policy. + if (root.getChildCount() < 3) { + throw new SemanticException("Expected more arguments: " + root.toStringTree()); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String poolPath = WMUtils.poolPath(root.getChild(1)); + Double allocFraction = null; + Integer queryParallelism = null; + String schedulingPolicy = null; + + for (int i = 2; i < root.getChildCount(); ++i) { + Tree child = root.getChild(i); + if (child.getChildCount() != 1) { + throw new SemanticException("Expected 1 paramter for: " + child.getText()); + } + + String param = child.getChild(0).getText(); + switch (child.getType()) { + case HiveParser.TOK_ALLOC_FRACTION: + allocFraction = Double.parseDouble(param); + break; + case HiveParser.TOK_QUERY_PARALLELISM: + queryParallelism = Integer.parseInt(param); + break; + case HiveParser.TOK_SCHEDULING_POLICY: + schedulingPolicy = PlanUtils.stripQuotes(param); + break; + case HiveParser.TOK_PATH: + throw new SemanticException("Invalid parameter path in create pool"); + default: + throw new SemanticException("Invalid parameter " + child.getText() + " in create pool"); + } + } + + if (allocFraction == null) { + throw new SemanticException("alloc_fraction should be specified for a pool"); + } + if (queryParallelism == null) { + throw new SemanticException("query_parallelism should be specified for a pool"); + } + + CreateWMPoolDesc desc = new CreateWMPoolDesc(resourcePlanName, poolPath, allocFraction, queryParallelism, + schedulingPolicy); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolDesc.java new file mode 100644 index 0000000000..cfc81a5776 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolDesc.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.create; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for CREATE POOL commands. + */ +@Explain(displayName = "Create Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMPoolDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 4872940135771213510L; + + private final String resourcePlanName; + private final String poolPath; + private final double allocFraction; + private final int queryParallelism; + private final String schedulingPolicy; + + public CreateWMPoolDesc(String resourcePlanName, String poolPath, double allocFraction, int queryParallelism, + String schedulingPolicy) { + this.resourcePlanName = resourcePlanName; + this.poolPath = poolPath; + this.allocFraction = allocFraction; + this.queryParallelism = queryParallelism; + this.schedulingPolicy = schedulingPolicy; + } + + @Explain(displayName = "Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } + + @Explain(displayName = "Alloc fraction", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public double getAllocFraction() { + return allocFraction; + } + + @Explain(displayName = "Query parallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public int getQueryParallelism() { + return queryParallelism; + } + + @Explain(displayName = "Scheduling policy", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getSchedulingPolicy() { + return schedulingPolicy; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolOperation.java similarity index 63% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolOperation.java index 5298535270..c523e791f5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/CreateWMPoolOperation.java @@ -16,13 +16,16 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.create; import java.io.IOException; +import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.SemanticException; /** * Operation process of creating a workload management pool. @@ -34,7 +37,17 @@ public CreateWMPoolOperation(DDLOperationContext context, CreateWMPoolDesc desc) @Override public int execute() throws HiveException, IOException { - context.getDb().createWMPool(desc.getPool()); + WMPool pool = new WMPool(desc.getResourcePlanName(), desc.getPoolPath()); + pool.setAllocFraction(desc.getAllocFraction()); + pool.setQueryParallelism(desc.getQueryParallelism()); + if (desc.getSchedulingPolicy() != null) { + if (!MetaStoreUtils.isValidSchedulingPolicy(desc.getSchedulingPolicy())) { + throw new SemanticException("Invalid scheduling policy " + desc.getSchedulingPolicy()); + } + pool.setSchedulingPolicy(desc.getSchedulingPolicy()); + } + + context.getDb().createWMPool(pool); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/package-info.java new file mode 100644 index 0000000000..5447711758 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/create/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Create Pool DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.create; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolAnalyzer.java new file mode 100644 index 0000000000..49cf48f498 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolAnalyzer.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.drop; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.WMUtils; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for drop pool commands. + */ +@DDLType(type=HiveParser.TOK_DROP_POOL) +public class DropWMPoolAnalyzer extends BaseSemanticAnalyzer { + public DropWMPoolAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() != 2) { + throw new SemanticException("Invalid syntax for drop pool."); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String poolPath = WMUtils.poolPath(root.getChild(1)); + + DropWMPoolDesc desc = new DropWMPoolDesc(resourcePlanName, poolPath); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolDesc.java similarity index 73% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolDesc.java index 755c957835..2eef340594 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.drop; import java.io.Serializable; @@ -31,19 +31,20 @@ public class DropWMPoolDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -2608462103392563252L; - private final String planName; + private final String resourcePlanName; private final String poolPath; - public DropWMPoolDesc(String planName, String poolPath) { - this.planName = planName; + public DropWMPoolDesc(String resourcePlanName, String poolPath) { + this.resourcePlanName = resourcePlanName; this.poolPath = poolPath; } - @Explain(displayName="poolName", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPlanName() { - return planName; + @Explain(displayName="Resource plan name", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; } + @Explain(displayName="Pool path", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getPoolPath() { return poolPath; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolOperation.java similarity index 90% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolOperation.java index 44564c35fa..0939e790c3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/DropWMPoolOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.drop; import java.io.IOException; @@ -34,7 +34,7 @@ public DropWMPoolOperation(DDLOperationContext context, DropWMPoolDesc desc) { @Override public int execute() throws HiveException, IOException { - context.getDb().dropWMPool(desc.getPlanName(), desc.getPoolPath()); + context.getDb().dropWMPool(desc.getResourcePlanName(), desc.getPoolPath()); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/package-info.java new file mode 100644 index 0000000000..ca51de30fc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/pool/drop/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Drop Pool DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.pool.drop; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/AbstractAlterResourcePlanStatusOperation.java similarity index 60% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/AbstractAlterResourcePlanStatusOperation.java index fe9e7e1c5f..eca1840bbf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/AbstractAlterResourcePlanStatusOperation.java @@ -16,86 +16,59 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter; -import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; -import org.apache.hadoop.hive.ql.ddl.DDLUtils; -import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager; -import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager; - -import java.io.DataOutputStream; -import java.io.IOException; import java.util.Collection; import java.util.concurrent.ExecutionException; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager; +import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager; import org.apache.hadoop.hive.ql.metadata.HiveException; import com.google.common.util.concurrent.ListenableFuture; /** - * Operation process of altering a resource plan. + * Abstract ancestor of the enable / disable Resource Plan operations. */ -public class AlterResourcePlanOperation extends DDLOperation { - // Note: the resource plan operations are going to be annotated with namespace based on the config - // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see - // that there exist other namespaces, because one HS2 always operates inside just one and we - // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about - // namespaces - Hive.java will transparently scope everything. That's the idea anyway. - public AlterResourcePlanOperation(DDLOperationContext context, AlterResourcePlanDesc desc) { +public abstract class AbstractAlterResourcePlanStatusOperation extends DDLOperation { + + public AbstractAlterResourcePlanStatusOperation(DDLOperationContext context, T desc) { super(context, desc); } - @Override - public int execute() throws HiveException, IOException { - if (desc.shouldValidate()) { - WMValidateResourcePlanResponse result = context.getDb().validateResourcePlan(desc.getPlanName()); - try (DataOutputStream out = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { - context.getFormatter().showErrors(out, result); - } catch (IOException e) { - throw new HiveException(e); - } - return 0; - } - - WMNullableResourcePlan resourcePlan = desc.getResourcePlan(); - WMFullResourcePlan appliedResourcePlan = context.getDb().alterResourcePlan(desc.getPlanName(), resourcePlan, - desc.isEnableActivate(), desc.isForceDeactivate(), desc.isReplace()); - - boolean isActivate = resourcePlan.getStatus() != null && resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE; - boolean mustHaveAppliedChange = isActivate || desc.isForceDeactivate(); - if (!mustHaveAppliedChange && !desc.isReplace()) { - return 0; // The modification cannot affect an active plan. + protected void handleWMServiceChangeIfNeeded(WMFullResourcePlan appliedResourcePlan, boolean isActivate, + boolean isForceDeactivate, boolean replace) throws HiveException { + boolean mustHaveAppliedChange = isActivate || isForceDeactivate; + if (!mustHaveAppliedChange && !replace) { + return; // The modification cannot affect an active plan. } if (appliedResourcePlan == null && !mustHaveAppliedChange) { - return 0; // Replacing an inactive plan. + return; // Replacing an inactive plan. } WorkloadManager wm = WorkloadManager.getInstance(); boolean isInTest = HiveConf.getBoolVar(context.getConf(), ConfVars.HIVE_IN_TEST); if (wm == null && isInTest) { - return 0; // Skip for tests if WM is not present. + return; // Skip for tests if WM is not present. } - if ((appliedResourcePlan == null) != desc.isForceDeactivate()) { + if ((appliedResourcePlan == null) != isForceDeactivate) { throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable"); // TODO: shut down HS2? } assert appliedResourcePlan == null || appliedResourcePlan.getPlan().getStatus() == WMResourcePlanStatus.ACTIVE; - handleWorkloadManagementServiceChange(wm, isActivate, appliedResourcePlan); - - return 0; + handleWMServiceChange(wm, isActivate, appliedResourcePlan); } - private int handleWorkloadManagementServiceChange(WorkloadManager wm, boolean isActivate, + private int handleWMServiceChange(WorkloadManager wm, boolean isActivate, WMFullResourcePlan appliedResourcePlan) throws HiveException { String name = null; if (isActivate) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableAnalyzer.java new file mode 100644 index 0000000000..b355eee038 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableAnalyzer.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.disable; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for disable resource plan commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_RP_DISABLE) +public class AlterResourcePlanDisableAnalyzer extends BaseSemanticAnalyzer { + public AlterResourcePlanDisableAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() > 1) { + throw new SemanticException("Incorrect syntax"); + } + + String resourcePlanName = root.getChildCount() == 0 ? null : unescapeIdentifier(root.getChild(0).getText()); + + AlterResourcePlanDisableDesc desc = new AlterResourcePlanDisableDesc(resourcePlanName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableDesc.java similarity index 56% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableDesc.java index e43beeedd2..e00237a88c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableDesc.java @@ -16,30 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.disable; import java.io.Serializable; -import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * DDL task description for CREATE POOL commands. + * DDL task description for ALTER RESOURCE PLAN ... DISABLE or DISABLE RESOURCE PLAN commands. */ -@Explain(displayName = "Create Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateWMPoolDesc implements DDLDesc, Serializable { - private static final long serialVersionUID = 4872940135771213510L; +@Explain(displayName = "Disable Resource plan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanDisableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = -3514685833183437279L; - private final WMPool pool; + private final String resourcePlanName; - public CreateWMPoolDesc(WMPool pool) { - this.pool = pool; + public AlterResourcePlanDisableDesc(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; } - @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMPool getPool() { - return pool; + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableOperation.java new file mode 100644 index 0000000000..0af89837c7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/AlterResourcePlanDisableOperation.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.disable; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.AbstractAlterResourcePlanStatusOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of disabling a resource plan. + */ +public class AlterResourcePlanDisableOperation + extends AbstractAlterResourcePlanStatusOperation { + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanDisableOperation(DDLOperationContext context, AlterResourcePlanDisableDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException, IOException { + boolean forceDeactivate = desc.getResourcePlanName() == null; + + WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan(); + resourcePlan.setStatus(forceDeactivate ? WMResourcePlanStatus.ENABLED : WMResourcePlanStatus.DISABLED); + + WMFullResourcePlan appliedResourcePlan = context.getDb().alterResourcePlan(desc.getResourcePlanName(), resourcePlan, + false, forceDeactivate, false); + + handleWMServiceChangeIfNeeded(appliedResourcePlan, false, forceDeactivate, false); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/package-info.java new file mode 100644 index 0000000000..4397c985a7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/disable/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Disable Resource Plan DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.disable; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableAnalyzer.java new file mode 100644 index 0000000000..29a46cb58f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableAnalyzer.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.enable; + +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for enable resource plan commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_RP_ENABLE) +public class AlterResourcePlanEnableAnalyzer extends BaseSemanticAnalyzer { + public AlterResourcePlanEnableAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() == 0) { + console.printError("Activate a resource plan to enable workload management!"); + return; + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + + boolean enable = false; + boolean activate = false; + boolean replace = false; + + for (int i = 1; i < root.getChildCount(); ++i) { + Tree child = root.getChild(i); + switch (child.getType()) { + case HiveParser.TOK_ACTIVATE: + activate = true; + if (child.getChildCount() > 1) { + throw new SemanticException("Expected 0 or 1 arguments " + root.toStringTree()); + } else if (child.getChildCount() == 1) { + if (child.getChild(0).getType() != HiveParser.TOK_REPLACE) { + throw new SemanticException("Incorrect syntax " + root.toStringTree()); + } + replace = true; + } + break; + case HiveParser.TOK_ENABLE: + enable = true; + break; + case HiveParser.TOK_REPLACE: + replace = true; + break; + default: + throw new SemanticException("Unexpected token in alter resource plan statement: " + child.getType()); + } + } + + AlterResourcePlanEnableDesc desc = new AlterResourcePlanEnableDesc(resourcePlanName, enable, activate, replace); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableDesc.java new file mode 100644 index 0000000000..22c8b866fb --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableDesc.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.enable; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER RESOURCE PLAN ... ENABLE or ENABLE WORKLOAD MANAGEMENT commands. + */ +@Explain(displayName = "Enable Resource plan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanEnableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + private final String resourcePlanName; + private final boolean enable; + private final boolean activate; + private final boolean replace; + + public AlterResourcePlanEnableDesc(String resourcePlanName, boolean enable, boolean activate, boolean replace) { + this.resourcePlanName = resourcePlanName; + this.enable = enable; + this.activate = activate; + this.replace = replace; + } + + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName="Enable", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, + displayOnlyOnTrue=true) + public boolean isEnable() { + return enable; + } + + @Explain(displayName="Activate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, + displayOnlyOnTrue=true) + public boolean isActivate() { + return activate; + } + + @Explain(displayName="Replace", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, + displayOnlyOnTrue=true) + public boolean isReplace() { + return replace; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableOperation.java new file mode 100644 index 0000000000..5f8ab95a52 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/AlterResourcePlanEnableOperation.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.enable; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.AbstractAlterResourcePlanStatusOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of enabling a resource plan. + */ +public class AlterResourcePlanEnableOperation + extends AbstractAlterResourcePlanStatusOperation { + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanEnableOperation(DDLOperationContext context, AlterResourcePlanEnableDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException, IOException { + WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan(); + resourcePlan.setStatus(desc.isActivate() ? WMResourcePlanStatus.ACTIVE : WMResourcePlanStatus.ENABLED); + + boolean canActivateDisabled = desc.isEnable() && desc.isActivate() && !desc.isReplace(); + WMFullResourcePlan appliedResourcePlan = context.getDb().alterResourcePlan(desc.getResourcePlanName(), resourcePlan, + canActivateDisabled, false, desc.isReplace()); + + handleWMServiceChangeIfNeeded(appliedResourcePlan, desc.isActivate(), false, desc.isReplace()); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/package-info.java new file mode 100644 index 0000000000..6631ad7e1c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/enable/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Enable Resource Plan DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.enable; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/package-info.java new file mode 100644 index 0000000000..5bb28485d6 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter Resource Plan DDL operations. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameAnalyzer.java new file mode 100644 index 0000000000..dac15e0630 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameAnalyzer.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.rename; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter resource plan rename commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_RP_RENAME) +public class AlterResourcePlanRenameAnalyzer extends BaseSemanticAnalyzer { + public AlterResourcePlanRenameAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() != 2) { + throw new SemanticException("Expected two arguments"); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String newResourcePlanName = unescapeIdentifier(root.getChild(1).getText()); + + AlterResourcePlanRenameDesc desc = new AlterResourcePlanRenameDesc(resourcePlanName, newResourcePlanName); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameDesc.java new file mode 100644 index 0000000000..c2793d0d7e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameDesc.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.rename; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER RESOURCE PLAN ... RENAME ... commands. + */ +@Explain(displayName = "Rename Resource plan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanRenameDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + private final String resourcePlanName; + private final String newResourcePlanName; + + public AlterResourcePlanRenameDesc(String resourcePlanName, String newResourcePlanName) { + this.resourcePlanName = resourcePlanName; + this.newResourcePlanName = newResourcePlanName; + } + + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName="New resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getNewResourcePlanName() { + return newResourcePlanName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameOperation.java new file mode 100644 index 0000000000..2a47aadaac --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/AlterResourcePlanRenameOperation.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.rename; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of altering a resource plan. + */ +public class AlterResourcePlanRenameOperation extends DDLOperation { + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanRenameOperation(DDLOperationContext context, AlterResourcePlanRenameDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException, IOException { + WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan(); + resourcePlan.setName(desc.getNewResourcePlanName()); + + context.getDb().alterResourcePlan(desc.getResourcePlanName(), resourcePlan, false, false, false); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/package-info.java new file mode 100644 index 0000000000..2aeb183dab --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/rename/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter Resource Plan Rename DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.rename; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceAnalyzer.java new file mode 100644 index 0000000000..b39c68807b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceAnalyzer.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.replace; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for replace resource plan commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_RP_REPLACE) +public class AlterResourcePlanReplaceAnalyzer extends BaseSemanticAnalyzer { + public AlterResourcePlanReplaceAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() < 1 || root.getChildCount() > 2) { + throw new SemanticException("Incorrect syntax"); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String destinationResourcePlan = root.getChildCount() == 2 ? unescapeIdentifier(root.getChild(1).getText()) : null; + + AlterResourcePlanReplaceDesc desc = new AlterResourcePlanReplaceDesc(resourcePlanName, destinationResourcePlan); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceDesc.java new file mode 100644 index 0000000000..5b8db93e9a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceDesc.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.replace; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for REPLACE [ACTIVE] RESOURCE PLAN ... commands. + */ +@Explain(displayName = "Replace Resource plan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanReplaceDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = -3514685833183437279L; + + private final String resourcePlanName; + private final String destinationResourcePlanName; + + public AlterResourcePlanReplaceDesc(String resourcePlanName, String destinationResourcePlanName) { + this.resourcePlanName = resourcePlanName; + this.destinationResourcePlanName = destinationResourcePlanName; + } + + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName="Destination Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDestinationResourcePlanName() { + return destinationResourcePlanName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceOperation.java new file mode 100644 index 0000000000..8f82b551c4 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/AlterResourcePlanReplaceOperation.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.replace; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.AbstractAlterResourcePlanStatusOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of replacing a resource plan. + */ +public class AlterResourcePlanReplaceOperation + extends AbstractAlterResourcePlanStatusOperation { + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanReplaceOperation(DDLOperationContext context, AlterResourcePlanReplaceDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException, IOException { + WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan(); + if (desc.getDestinationResourcePlanName() == null) { + resourcePlan.setStatus(WMResourcePlanStatus.ACTIVE); + } else { + resourcePlan.setName(desc.getDestinationResourcePlanName()); + } + + WMFullResourcePlan appliedResourcePlan = context.getDb().alterResourcePlan(desc.getResourcePlanName(), resourcePlan, + false, false, true); + + handleWMServiceChangeIfNeeded(appliedResourcePlan, false, false, true); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/package-info.java new file mode 100644 index 0000000000..40607f89fc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/replace/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Replace Resource Plan DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.replace; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetAnalyzer.java new file mode 100644 index 0000000000..a9025e0745 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetAnalyzer.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.set; + +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.WMUtils; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter resource plan set commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_RP_SET) +public class AlterResourcePlanSetAnalyzer extends BaseSemanticAnalyzer { + public AlterResourcePlanSetAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + + Integer queryParallelism = null; + String defaultPool = null; + for (int i = 1; i < root.getChildCount(); ++i) { + Tree child = root.getChild(i); + switch (child.getType()) { + case HiveParser.TOK_QUERY_PARALLELISM: + if (child.getChildCount() != 1) { + throw new SemanticException("Expected one argument"); + } + + queryParallelism = Integer.parseInt(child.getChild(0).getText()); + break; + case HiveParser.TOK_DEFAULT_POOL: + if (child.getChildCount() != 1) { + throw new SemanticException("Expected one argument"); + } + + defaultPool = WMUtils.poolPath(child.getChild(0)); + break; + default: + throw new SemanticException("Unexpected token in alter resource plan statement: " + child.getType()); + } + } + + AlterResourcePlanSetDesc desc = new AlterResourcePlanSetDesc(resourcePlanName, queryParallelism, defaultPool); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetDesc.java new file mode 100644 index 0000000000..7aa716b989 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetDesc.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.set; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER RESOURCE PLAN ... SET ... commands. + */ +@Explain(displayName = "Alter Resource plan Set", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanSetDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = -3514685833183437279L; + + private final String resourcePlanName; + private final Integer queryParallelism; + private final String defaultPool; + + public AlterResourcePlanSetDesc(String resourcePlanName, Integer queryParallelism, String defaultPool) { + this.resourcePlanName = resourcePlanName; + this.queryParallelism = queryParallelism; + this.defaultPool = defaultPool; + } + + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName="Query parallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getQueryParallelism() { + return queryParallelism; + } + + @Explain(displayName="Default pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDefaultPool() { + return defaultPool; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetOperation.java new file mode 100644 index 0000000000..58af58e40d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/AlterResourcePlanSetOperation.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.set; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of setting properties of a resource plan. + */ +public class AlterResourcePlanSetOperation extends DDLOperation { + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanSetOperation(DDLOperationContext context, AlterResourcePlanSetDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException, IOException { + WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan(); + + if (desc.getQueryParallelism() != null) { + resourcePlan.setIsSetQueryParallelism(true); + resourcePlan.setQueryParallelism(desc.getQueryParallelism()); + } + + if (desc.getDefaultPool() != null) { + resourcePlan.setIsSetDefaultPoolPath(true); + resourcePlan.setDefaultPoolPath(desc.getDefaultPool()); + } + + context.getDb().alterResourcePlan(desc.getResourcePlanName(), resourcePlan, false, false, false); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/package-info.java new file mode 100644 index 0000000000..f1fdebceef --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/set/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter Resource Plan Set DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.set; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetAnalyzer.java new file mode 100644 index 0000000000..8bb6039e91 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetAnalyzer.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.unset; + +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter resource plan unset commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_RP_UNSET) +public class AlterResourcePlanUnsetAnalyzer extends BaseSemanticAnalyzer { + public AlterResourcePlanUnsetAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + + boolean unsetQueryParallelism = false; + boolean unsetDefaultPool = false; + for (int i = 1; i < root.getChildCount(); ++i) { + Tree child = root.getChild(i); + switch (child.getType()) { + case HiveParser.TOK_QUERY_PARALLELISM: + if (child.getChildCount() != 0) { + throw new SemanticException("Expected zero argument"); + } + + unsetQueryParallelism = true; + break; + case HiveParser.TOK_DEFAULT_POOL: + if (child.getChildCount() != 0) { + throw new SemanticException("Expected zero argument"); + } + + unsetDefaultPool = true; + break; + default: + throw new SemanticException( + "Unexpected token in alter resource plan statement: " + child.getType()); + } + } + + AlterResourcePlanUnsetDesc desc = new AlterResourcePlanUnsetDesc(resourcePlanName, unsetQueryParallelism, + unsetDefaultPool); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetDesc.java new file mode 100644 index 0000000000..83a9e70a0f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetDesc.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.unset; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER RESOURCE PLAN ... UNSET ... commands. + */ +@Explain(displayName = "Alter Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanUnsetDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = -3514685833183437279L; + + private final String resourcePlanName; + private final boolean unsetQueryParallelism; + private final boolean unsetDefaultPool; + + public AlterResourcePlanUnsetDesc(String resourcePlanName, boolean unsetQueryParallelism, boolean unsetDefaultPool) { + this.resourcePlanName = resourcePlanName; + this.unsetQueryParallelism = unsetQueryParallelism; + this.unsetDefaultPool = unsetDefaultPool; + } + + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName="Unset Query parallelism", displayOnlyOnTrue=true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isUnsetQueryParallelism() { + return unsetQueryParallelism; + } + + @Explain(displayName="Unset Default Pool", displayOnlyOnTrue=true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isUnsetDefaultPool() { + return unsetDefaultPool; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetOperation.java new file mode 100644 index 0000000000..04a63d3ea3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/AlterResourcePlanUnsetOperation.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.unset; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of unsetting properties of a resource plan. + */ +public class AlterResourcePlanUnsetOperation extends DDLOperation { + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanUnsetOperation(DDLOperationContext context, AlterResourcePlanUnsetDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException, IOException { + WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan(); + + if (desc.isUnsetQueryParallelism()) { + resourcePlan.setIsSetQueryParallelism(true); + resourcePlan.unsetQueryParallelism(); + } + + if (desc.isUnsetDefaultPool()) { + resourcePlan.setIsSetDefaultPoolPath(true); + resourcePlan.unsetDefaultPoolPath(); + } + + context.getDb().alterResourcePlan(desc.getResourcePlanName(), resourcePlan, false, false, false); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/package-info.java new file mode 100644 index 0000000000..754f46a265 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/unset/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter Resource Plan Unset DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.unset; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateAnalyzer.java new file mode 100644 index 0000000000..fcabf08ad1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateAnalyzer.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.validate; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter resource plan validate commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_RP_VALIDATE) +public class AlterResourcePlanValidateAnalyzer extends BaseSemanticAnalyzer { + public AlterResourcePlanValidateAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() != 1) { + throw new SemanticException("Incorrect syntax"); + } + + ctx.setResFile(ctx.getLocalTmpPath()); + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + + AlterResourcePlanValidateDesc desc = new AlterResourcePlanValidateDesc(resourcePlanName, ctx.getResFile()); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(AlterResourcePlanValidateDesc.SCHEMA)); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateDesc.java new file mode 100644 index 0000000000..cbcf80ef6f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateDesc.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.validate; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER RESOURCE PLAN ... VALIDATE commands. + */ +@Explain(displayName = "Validate Resource Plan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanValidateDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + public static final String SCHEMA = "error#string"; + + private final String resourcePlanName; + private final Path resFile; + + public AlterResourcePlanValidateDesc(String resourcePlanName, Path resFile) { + this.resourcePlanName = resourcePlanName; + this.resFile = resFile; + } + + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName = "Result file", explainLevels = { Level.EXTENDED }) + public Path getResFile() { + return resFile; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateOperation.java new file mode 100644 index 0000000000..bda4fe9137 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/AlterResourcePlanValidateOperation.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.validate; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of validating a resource plan. + */ +public class AlterResourcePlanValidateOperation extends DDLOperation { + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanValidateOperation(DDLOperationContext context, AlterResourcePlanValidateDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws HiveException, IOException { + WMValidateResourcePlanResponse result = context.getDb().validateResourcePlan(desc.getResourcePlanName()); + try (DataOutputStream out = DDLUtils.getOutputStream(desc.getResFile(), context)) { + context.getFormatter().showErrors(out, result); + } catch (IOException e) { + throw new HiveException(e); + } + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/package-info.java new file mode 100644 index 0000000000..f998608f2e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/alter/validate/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter Resource Plan Validate DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.alter.validate; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanAnalyzer.java new file mode 100644 index 0000000000..a275a19741 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanAnalyzer.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.create; + +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for create resource plan commands. + */ +@DDLType(type=HiveParser.TOK_CREATE_RP) +public class CreateResourcePlanAnalyzer extends BaseSemanticAnalyzer { + public CreateResourcePlanAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() == 0) { + throw new SemanticException("Expected name in CREATE RESOURCE PLAN statement"); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + Integer queryParallelism = null; + String likeName = null; + boolean ifNotExists = false; + + for (int i = 1; i < root.getChildCount(); ++i) { + Tree child = root.getChild(i); + switch (child.getType()) { + case HiveParser.TOK_QUERY_PARALLELISM: + // Note: later we may be able to set multiple things together (except LIKE). + if (queryParallelism == null && likeName == null) { + queryParallelism = Integer.parseInt(child.getChild(0).getText()); + } else { + throw new SemanticException("Conflicting create arguments " + root.toStringTree()); + } + break; + case HiveParser.TOK_LIKERP: + if (queryParallelism == null && likeName == null) { + likeName = unescapeIdentifier(child.getChild(0).getText()); + } else { + throw new SemanticException("Conflicting create arguments " + root.toStringTree()); + } + break; + case HiveParser.TOK_IFNOTEXISTS: + ifNotExists = true; + break; + default: + throw new SemanticException("Invalid create arguments " + root.toStringTree()); + } + } + + CreateResourcePlanDesc desc = new CreateResourcePlanDesc(resourcePlanName, queryParallelism, likeName, ifNotExists); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanDesc.java similarity index 73% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanDesc.java index 17d5083f2a..181b1a8fd7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.create; import java.io.Serializable; @@ -27,28 +27,29 @@ /** * DDL task description for CREATE RESOURCE PLAN commands. */ -@Explain(displayName = "Create ResourcePlan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +@Explain(displayName = "Create Resource plan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public class CreateResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -3492803425541479414L; - private final String planName; + private final String resourcePlanName; private final Integer queryParallelism; private final String copyFromName; private final boolean ifNotExists; - public CreateResourcePlanDesc(String planName, Integer queryParallelism, String copyFromName, boolean ifNotExists) { - this.planName = planName; + public CreateResourcePlanDesc(String resourcePlanName, Integer queryParallelism, String copyFromName, + boolean ifNotExists) { + this.resourcePlanName = resourcePlanName; this.queryParallelism = queryParallelism; this.copyFromName = copyFromName; this.ifNotExists = ifNotExists; } - @Explain(displayName="planName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPlanName() { - return planName; + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; } - @Explain(displayName="queryParallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="Query parallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public Integer getQueryParallelism() { return queryParallelism; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanOperation.java similarity index 91% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanOperation.java index 95ee6dab2d..f75935ba94 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/CreateResourcePlanOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.create; import java.io.IOException; @@ -35,7 +35,7 @@ public CreateResourcePlanOperation(DDLOperationContext context, CreateResourcePl @Override public int execute() throws HiveException, IOException { - WMResourcePlan plan = new WMResourcePlan(desc.getPlanName()); + WMResourcePlan plan = new WMResourcePlan(desc.getResourcePlanName()); if (desc.getQueryParallelism() != null) { plan.setQueryParallelism(desc.getQueryParallelism()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/package-info.java new file mode 100644 index 0000000000..3b4ab0e973 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/create/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Create Resource Plan DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.create; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanAnalyzer.java new file mode 100644 index 0000000000..8af7930859 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanAnalyzer.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.drop; + +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for drop resource plan commands. + */ +@DDLType(type=HiveParser.TOK_DROP_RP) +public class DropResourcePlanAnalyzer extends BaseSemanticAnalyzer { + public DropResourcePlanAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() == 0) { + throw new SemanticException("Expected name in DROP RESOURCE PLAN statement"); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + boolean ifExists = false; + + for (int i = 1; i < root.getChildCount(); ++i) { + Tree child = root.getChild(i); + switch (child.getType()) { + case HiveParser.TOK_IFEXISTS: + ifExists = true; + break; + default: + throw new SemanticException("Invalid create arguments " + root.toStringTree()); + } + } + + DropResourcePlanDesc desc = new DropResourcePlanDesc(resourcePlanName, ifExists); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanDesc.java similarity index 68% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanDesc.java index ef7c723524..af23166260 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.drop; import java.io.Serializable; @@ -27,24 +27,24 @@ /** * DDL task description for DROP RESOURCE PLAN commands. */ -@Explain(displayName = "Drop Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +@Explain(displayName = "Drop Resource plan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public class DropResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1258596919510047766L; - private final String planName; + private final String resourcePlanName; private final boolean ifExists; - public DropResourcePlanDesc(String planName, boolean ifExists) { - this.planName = planName; + public DropResourcePlanDesc(String resourcePlanName, boolean ifExists) { + this.resourcePlanName = resourcePlanName; this.ifExists = ifExists; } - @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPlanName() { - return planName; + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; } - @Explain(displayName="ifExists", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, + @Explain(displayName="If exists", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, displayOnlyOnTrue = true) public boolean getIfExists() { return ifExists; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanOperation.java similarity index 89% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanOperation.java index 964e989eb8..b29d495451 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/DropResourcePlanOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.drop; import java.io.IOException; @@ -34,7 +34,7 @@ public DropResourcePlanOperation(DDLOperationContext context, DropResourcePlanDe @Override public int execute() throws HiveException, IOException { - context.getDb().dropResourcePlan(desc.getPlanName(), desc.getIfExists()); + context.getDb().dropResourcePlan(desc.getResourcePlanName(), desc.getIfExists()); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/package-info.java new file mode 100644 index 0000000000..48905485fd --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/drop/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Drop Resource Plan DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.drop; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanAnalyzer.java new file mode 100644 index 0000000000..440e5759a7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanAnalyzer.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.show; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for show resource plan commands. + */ +@DDLType(type=HiveParser.TOK_SHOW_RP) +public class ShowResourcePlanAnalyzer extends BaseSemanticAnalyzer { + public ShowResourcePlanAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() > 1) { + throw new SemanticException("Invalid syntax for SHOW RESOURCE PLAN statement"); + } + + ctx.setResFile(ctx.getLocalTmpPath()); + + String resourcePlanName = (root.getChildCount() == 0) ? null : unescapeIdentifier(root.getChild(0).getText()); + + ShowResourcePlanDesc desc = new ShowResourcePlanDesc(resourcePlanName, ctx.getResFile().toString()); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(desc.getSchema())); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanDesc.java similarity index 76% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanDesc.java index e2cdcb531d..187be6c3b2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.show; import java.io.Serializable; @@ -34,25 +34,25 @@ private static final String ALL_SCHEMA = "rp_name,status,query_parallelism#string,string,int"; private static final String SINGLE_SCHEMA = "line#string"; - private final String planName; + private final String resourcePlanName; private final String resFile; - public ShowResourcePlanDesc(String planName, String resFile) { - this.planName = planName; + public ShowResourcePlanDesc(String resourcePlanName, String resFile) { + this.resourcePlanName = resourcePlanName; this.resFile = resFile; } - @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getResourcePlanName() { - return planName; + return resourcePlanName; } - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + @Explain(displayName = "Result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } public String getSchema() { - return (planName == null) ? ALL_SCHEMA : SINGLE_SCHEMA; + return (resourcePlanName == null) ? ALL_SCHEMA : SINGLE_SCHEMA; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanOperation.java similarity index 89% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanOperation.java index aa586fcb9e..8a5739b68c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/ShowResourcePlanOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.show; import java.io.DataOutputStream; import java.io.IOException; @@ -39,9 +39,9 @@ public ShowResourcePlanOperation(DDLOperationContext context, ShowResourcePlanDe public int execute() throws HiveException, IOException { // TODO: Enhance showResourcePlan to display all the pools, triggers and mappings. try (DataOutputStream out = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { - String planName = desc.getResourcePlanName(); - if (planName != null) { - context.getFormatter().showFullResourcePlan(out, context.getDb().getResourcePlan(planName)); + String resourcePlanName = desc.getResourcePlanName(); + if (resourcePlanName != null) { + context.getFormatter().showFullResourcePlan(out, context.getDb().getResourcePlan(resourcePlanName)); } else { context.getFormatter().showResourcePlans(out, context.getDb().getAllResourcePlans()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/package-info.java new file mode 100644 index 0000000000..80918f1326 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/resourceplan/show/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Show Resource Plan DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.resourceplan.show; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/TriggerUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/TriggerUtils.java new file mode 100644 index 0000000000..27848b954c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/TriggerUtils.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger; + +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.WMUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.wm.ExecutionTrigger; + +/** + * Common utilities for Trigger related ddl operations. + */ +public final class TriggerUtils { + private TriggerUtils() { + throw new UnsupportedOperationException("TriggerUtils should not be instantiated"); + } + + public static String buildTriggerExpression(ASTNode node) throws SemanticException { + if (node.getType() != HiveParser.TOK_TRIGGER_EXPRESSION || node.getChildCount() == 0) { + throw new SemanticException("Invalid trigger expression."); + } + + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < node.getChildCount(); ++i) { + builder.append(node.getChild(i).getText()); // Don't strip quotes. + builder.append(' '); + } + builder.deleteCharAt(builder.length() - 1); + return builder.toString(); + } + + public static String buildTriggerActionExpression(ASTNode node) throws SemanticException { + switch (node.getType()) { + case HiveParser.KW_KILL: + return "KILL"; + case HiveParser.KW_MOVE: + if (node.getChildCount() != 1) { + throw new SemanticException("Invalid move to clause in trigger action."); + } + String poolPath = WMUtils.poolPath(node.getChild(0)); + return "MOVE TO " + poolPath; + default: + throw new SemanticException("Unknown token in action clause: " + node.getType()); + } + } + + public static void validateTrigger(WMTrigger trigger) throws HiveException { + try { + ExecutionTrigger.fromWMTrigger(trigger); + } catch (IllegalArgumentException e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerAnalyzer.java new file mode 100644 index 0000000000..80f8db4474 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerAnalyzer.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.alter; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.TriggerUtils; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter trigger commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_TRIGGER) +public class AlterWMTriggerAnalyzer extends BaseSemanticAnalyzer { + public AlterWMTriggerAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() != 4) { + throw new SemanticException("Invalid syntax for alter trigger statement"); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String triggerName = unescapeIdentifier(root.getChild(1).getText()); + String triggerExpression = TriggerUtils.buildTriggerExpression((ASTNode)root.getChild(2)); + String actionExpression = TriggerUtils.buildTriggerActionExpression((ASTNode)root.getChild(3)); + + AlterWMTriggerDesc desc = new AlterWMTriggerDesc(resourcePlanName, triggerName, triggerExpression, + actionExpression); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerDesc.java new file mode 100644 index 0000000000..a033a921f3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerDesc.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.alter; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TRIGGER commands. + */ +@Explain(displayName="Alter WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMTriggerDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + private final String resourcePlanName; + private final String triggerName; + private final String triggerExpression; + private final String actionExpression; + + public AlterWMTriggerDesc(String resourcePlanName, String triggerName, String triggerExpression, + String actionExpression) { + this.resourcePlanName = resourcePlanName; + this.triggerName = triggerName; + this.triggerExpression = triggerExpression; + this.actionExpression = actionExpression; + } + + @Explain(displayName="Resource plan name", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName="Trigger name", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerName() { + return triggerName; + } + + @Explain(displayName="Trigger expression", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerExpression() { + return triggerExpression; + } + + @Explain(displayName="Action expression", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getActionExpression() { + return actionExpression; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerOperation.java similarity index 72% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerOperation.java index 34b2e33694..44d27334f8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/AlterWMTriggerOperation.java @@ -16,12 +16,14 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.alter; import java.io.IOException; +import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.TriggerUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; /** @@ -34,8 +36,12 @@ public AlterWMTriggerOperation(DDLOperationContext context, AlterWMTriggerDesc d @Override public int execute() throws HiveException, IOException { - WMUtils.validateTrigger(desc.getTrigger()); - context.getDb().alterWMTrigger(desc.getTrigger()); + WMTrigger trigger = new WMTrigger(desc.getResourcePlanName(), desc.getTriggerName()); + trigger.setTriggerExpression(desc.getTriggerExpression()); + trigger.setActionExpression(desc.getActionExpression()); + + TriggerUtils.validateTrigger(trigger); + context.getDb().alterWMTrigger(trigger); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/package-info.java new file mode 100644 index 0000000000..1f39b17e8f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/alter/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter Trigger DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.alter; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerAnalyzer.java new file mode 100644 index 0000000000..4eb0d143e6 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerAnalyzer.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.create; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.TriggerUtils; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for create trigger commands. + */ +@DDLType(type=HiveParser.TOK_CREATE_TRIGGER) +public class CreateWMTriggerAnalyzer extends BaseSemanticAnalyzer { + public CreateWMTriggerAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() != 4) { + throw new SemanticException("Invalid syntax for create trigger statement"); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String triggerName = unescapeIdentifier(root.getChild(1).getText()); + String triggerExpression = TriggerUtils.buildTriggerExpression((ASTNode)root.getChild(2)); + String actionExpression = TriggerUtils.buildTriggerActionExpression((ASTNode)root.getChild(3)); + + CreateWMTriggerDesc desc = new CreateWMTriggerDesc(resourcePlanName, triggerName, triggerExpression, + actionExpression); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerDesc.java new file mode 100644 index 0000000000..6d218be7f7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerDesc.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.create; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for CREATE TRIGGER commands. + */ +@Explain(displayName="Create WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMTriggerDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + private final String resourcePlanName; + private final String triggerName; + private final String triggerExpression; + private final String actionExpression; + + public CreateWMTriggerDesc(String resourcePlanName, String triggerName, String triggerExpression, + String actionExpression) { + this.resourcePlanName = resourcePlanName; + this.triggerName = triggerName; + this.triggerExpression = triggerExpression; + this.actionExpression = actionExpression; + } + + @Explain(displayName="Resource plan name", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + @Explain(displayName="Trigger name", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerName() { + return triggerName; + } + + @Explain(displayName="Trigger expression", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerExpression() { + return triggerExpression; + } + + @Explain(displayName="Action expression", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getActionExpression() { + return actionExpression; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerOperation.java similarity index 72% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerOperation.java index 5fc0e08fdc..b44988f0d9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/CreateWMTriggerOperation.java @@ -16,12 +16,14 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.create; import java.io.IOException; +import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.TriggerUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; /** @@ -34,8 +36,12 @@ public CreateWMTriggerOperation(DDLOperationContext context, CreateWMTriggerDesc @Override public int execute() throws HiveException, IOException { - WMUtils.validateTrigger(desc.getTrigger()); - context.getDb().createWMTrigger(desc.getTrigger()); + WMTrigger trigger = new WMTrigger(desc.getResourcePlanName(), desc.getTriggerName()); + trigger.setTriggerExpression(desc.getTriggerExpression()); + trigger.setActionExpression(desc.getActionExpression()); + + TriggerUtils.validateTrigger(trigger); + context.getDb().createWMTrigger(trigger); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/package-info.java new file mode 100644 index 0000000000..3f72467d5e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/create/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Create Trigger DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.create; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerAnalyzer.java new file mode 100644 index 0000000000..7f7a08e267 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerAnalyzer.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.drop; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for drop trigger commands. + */ +@DDLType(type=HiveParser.TOK_DROP_TRIGGER) +public class DropWMTriggerAnalyzer extends BaseSemanticAnalyzer { + public DropWMTriggerAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() != 2) { + throw new SemanticException("Invalid syntax for drop trigger."); + } + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + String triggerName = unescapeIdentifier(root.getChild(1).getText()); + + DropWMTriggerDesc desc = new DropWMTriggerDesc(resourcePlanName, triggerName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerDesc.java similarity index 73% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerDesc.java index 7096706914..7139f7845a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.drop; import java.io.Serializable; @@ -31,20 +31,20 @@ public class DropWMTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 963803766313787632L; - private final String planName; + private final String resourcePlanName; private final String triggerName; - public DropWMTriggerDesc(String planName, String triggerName) { - this.planName = planName; + public DropWMTriggerDesc(String resourcePlanName, String triggerName) { + this.resourcePlanName = resourcePlanName; this.triggerName = triggerName; } - @Explain(displayName="resourcePlanName", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPlanName() { - return planName; + @Explain(displayName="Resource plan name", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; } - @Explain(displayName="triggerName", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="Trigger name", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTriggerName() { return triggerName; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerOperation.java similarity index 89% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerOperation.java index 7c94215925..5796be7231 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/DropWMTriggerOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.drop; import java.io.IOException; @@ -34,7 +34,7 @@ public DropWMTriggerOperation(DDLOperationContext context, DropWMTriggerDesc des @Override public int execute() throws HiveException, IOException { - context.getDb().dropWMTrigger(desc.getPlanName(), desc.getTriggerName()); + context.getDb().dropWMTrigger(desc.getResourcePlanName(), desc.getTriggerName()); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/package-info.java new file mode 100644 index 0000000000..a8fabfb992 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/drop/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Create Trigger DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.drop; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/package-info.java new file mode 100644 index 0000000000..707cfca3da --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Trigger DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerAnalyzer.java new file mode 100644 index 0000000000..c3aa94c377 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerAnalyzer.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.add; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.WMUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter pool add trigger commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_POOL_ADD_TRIGGER) +public class AlterPoolAddTriggerAnalyzer extends BaseSemanticAnalyzer { + public AlterPoolAddTriggerAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() != 3) { + throw new SemanticException("Invalid syntax for alter pool add trigger: " + root.toStringTree()); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + + String poolPath = root.getChild(1).getType() == HiveParser.TOK_UNMANAGED ? + null : WMUtils.poolPath(root.getChild(1)); + String triggerName = unescapeIdentifier(root.getChild(2).getText()); + + AlterPoolAddTriggerDesc desc = new AlterPoolAddTriggerDesc(resourcePlanName, triggerName, poolPath); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerDesc.java similarity index 74% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerDesc.java index 5aef9fa1f1..772ffa5406 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerDesc.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.add; import java.io.Serializable; @@ -30,21 +30,19 @@ public class AlterPoolAddTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 383046258694558029L; - private final String planName; + private final String resourcePlanName; private final String triggerName; private final String poolPath; - private final boolean isUnmanagedPool; - public AlterPoolAddTriggerDesc(String planName, String triggerName, String poolPath, boolean isUnmanagedPool) { - this.planName = planName; + public AlterPoolAddTriggerDesc(String resourcePlanName, String triggerName, String poolPath) { + this.resourcePlanName = resourcePlanName; this.triggerName = triggerName; this.poolPath = poolPath; - this.isUnmanagedPool = isUnmanagedPool; } - @Explain(displayName = "resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPlanName() { - return planName; + @Explain(displayName = "Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; } @Explain(displayName = "Trigger name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -52,16 +50,12 @@ public String getTriggerName() { return triggerName; } - @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPoolPathForExplain() { - return isUnmanagedPool ? "" : poolPath; - } - public String getPoolPath() { return poolPath; } - public boolean isUnmanagedPool() { - return isUnmanagedPool; + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPathForExplain() { + return poolPath == null ? "" : poolPath; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerOperation.java similarity index 81% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerOperation.java index 78934551f5..8ac02774b0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/AlterPoolAddTriggerOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.add; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; @@ -36,12 +36,11 @@ public AlterPoolAddTriggerOperation(DDLOperationContext context, AlterPoolAddTri @Override public int execute() throws HiveException, IOException { - if (!desc.isUnmanagedPool()) { - context.getDb().createOrDropTriggerToPoolMapping(desc.getPlanName(), desc.getTriggerName(), desc.getPoolPath(), - false); + if (desc.getPoolPath() != null) { + context.getDb().createOrDropTriggerToPoolMapping(desc.getResourcePlanName(), desc.getTriggerName(), + desc.getPoolPath(), false); } else { - assert desc.getPoolPath() == null; - WMTrigger trigger = new WMTrigger(desc.getPlanName(), desc.getTriggerName()); + WMTrigger trigger = new WMTrigger(desc.getResourcePlanName(), desc.getTriggerName()); // If we are dropping from unmanaged, unset the flag; and vice versa trigger.setIsInUnmanaged(true); context.getDb().alterWMTrigger(trigger); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/package-info.java new file mode 100644 index 0000000000..66268fe556 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/add/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Add Trigger to Pool DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.add; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerAnalyzer.java new file mode 100644 index 0000000000..cce878f270 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerAnalyzer.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.drop; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.WMUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter pool drop trigger commands. + */ +@DDLType(type=HiveParser.TOK_ALTER_POOL_DROP_TRIGGER) +public class AlterPoolDropTriggerAnalyzer extends BaseSemanticAnalyzer { + public AlterPoolDropTriggerAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + if (root.getChildCount() != 3) { + throw new SemanticException("Invalid syntax for alter pool add trigger: " + root.toStringTree()); + } + + String resourcePlanName = unescapeIdentifier(root.getChild(0).getText()); + + String poolPath = root.getChild(1).getType() == HiveParser.TOK_UNMANAGED ? + null : WMUtils.poolPath(root.getChild(1)); + String triggerName = unescapeIdentifier(root.getChild(2).getText()); + + AlterPoolDropTriggerDesc desc = new AlterPoolDropTriggerDesc(resourcePlanName, triggerName, poolPath); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + DDLUtils.addServiceOutput(conf, getOutputs()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerDesc.java similarity index 74% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerDesc.java index e6942fcaae..ad401468ff 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.drop; import java.io.Serializable; @@ -31,21 +31,19 @@ public class AlterPoolDropTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 383046258694558029L; - private final String planName; + private final String resourcePlanName; private final String triggerName; private final String poolPath; - private final boolean isUnmanagedPool; - public AlterPoolDropTriggerDesc(String planName, String triggerName, String poolPath, boolean isUnmanagedPool) { - this.planName = planName; + public AlterPoolDropTriggerDesc(String resourcePlanName, String triggerName, String poolPath) { + this.resourcePlanName = resourcePlanName; this.triggerName = triggerName; this.poolPath = poolPath; - this.isUnmanagedPool = isUnmanagedPool; } - @Explain(displayName = "resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPlanName() { - return planName; + @Explain(displayName = "Resource plan name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; } @Explain(displayName = "Trigger name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -53,16 +51,12 @@ public String getTriggerName() { return triggerName; } - @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPoolPathForExplain() { - return isUnmanagedPool ? "" : poolPath; - } - public String getPoolPath() { return poolPath; } - public boolean isUnmanagedPool() { - return isUnmanagedPool; + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPathForExplain() { + return poolPath == null ? "" : poolPath; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerOperation.java similarity index 82% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerOperation.java index 4a8b404361..d3d353935f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/AlterPoolDropTriggerOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.workloadmanagement; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.drop; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; @@ -36,12 +36,11 @@ public AlterPoolDropTriggerOperation(DDLOperationContext context, AlterPoolDropT @Override public int execute() throws HiveException, IOException { - if (!desc.isUnmanagedPool()) { - context.getDb().createOrDropTriggerToPoolMapping(desc.getPlanName(), desc.getTriggerName(), desc.getPoolPath(), - true); + if (desc.getPoolPath() != null) { + context.getDb().createOrDropTriggerToPoolMapping(desc.getResourcePlanName(), desc.getTriggerName(), + desc.getPoolPath(), true); } else { - assert desc.getPoolPath() == null; - WMTrigger trigger = new WMTrigger(desc.getPlanName(), desc.getTriggerName()); + WMTrigger trigger = new WMTrigger(desc.getResourcePlanName(), desc.getTriggerName()); // If we are dropping from unmanaged, unset the flag; and vice versa trigger.setIsInUnmanaged(false); context.getDb().alterWMTrigger(trigger); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/package-info.java new file mode 100644 index 0000000000..6a2b6b4ee1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/trigger/pool/drop/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Drop Trigger from Pool DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement.trigger.pool.drop; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 023369fb36..1865d77000 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -54,12 +54,6 @@ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; -import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Driver; @@ -121,28 +115,12 @@ import org.apache.hadoop.hive.ql.ddl.view.AlterMaterializedViewRewriteDesc; import org.apache.hadoop.hive.ql.ddl.view.DropMaterializedViewDesc; import org.apache.hadoop.hive.ql.ddl.view.DropViewDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolAddTriggerDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolDropTriggerDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterResourcePlanDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMMappingDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMPoolDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMTriggerDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateResourcePlanDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMMappingDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMPoolDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMTriggerDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMMappingDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMPoolDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMTriggerDesc; -import org.apache.hadoop.hive.ql.ddl.workloadmanagement.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.hooks.Entity.Type; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; @@ -474,46 +452,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { case HiveParser.TOK_CACHE_METADATA: analyzeCacheMetadata(ast); break; - case HiveParser.TOK_CREATE_RP: - analyzeCreateResourcePlan(ast); - break; - case HiveParser.TOK_SHOW_RP: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowResourcePlan(ast); - break; - case HiveParser.TOK_ALTER_RP: - analyzeAlterResourcePlan(ast); - break; - case HiveParser.TOK_DROP_RP: - analyzeDropResourcePlan(ast); - break; - case HiveParser.TOK_CREATE_TRIGGER: - analyzeCreateTrigger(ast); - break; - case HiveParser.TOK_ALTER_TRIGGER: - analyzeAlterTrigger(ast); - break; - case HiveParser.TOK_DROP_TRIGGER: - analyzeDropTrigger(ast); - break; - case HiveParser.TOK_CREATE_POOL: - analyzeCreatePool(ast); - break; - case HiveParser.TOK_ALTER_POOL: - analyzeAlterPool(ast); - break; - case HiveParser.TOK_DROP_POOL: - analyzeDropPool(ast); - break; - case HiveParser.TOK_CREATE_MAPPING: - analyzeCreateOrAlterMapping(ast, false); - break; - case HiveParser.TOK_ALTER_MAPPING: - analyzeCreateOrAlterMapping(ast, true); - break; - case HiveParser.TOK_DROP_MAPPING: - analyzeDropMapping(ast); - break; default: throw new SemanticException("Unsupported command: " + ast); } @@ -656,450 +594,6 @@ private int isPartitionValueContinuous(List partitionKeys, return counter; } - private void analyzeCreateResourcePlan(ASTNode ast) throws SemanticException { - if (ast.getChildCount() == 0) { - throw new SemanticException("Expected name in CREATE RESOURCE PLAN statement"); - } - String resourcePlanName = unescapeIdentifier(ast.getChild(0).getText()); - Integer queryParallelism = null; - String likeName = null; - boolean ifNotExists = false; - for (int i = 1; i < ast.getChildCount(); ++i) { - Tree child = ast.getChild(i); - switch (child.getType()) { - case HiveParser.TOK_QUERY_PARALLELISM: - // Note: later we may be able to set multiple things together (except LIKE). - if (queryParallelism == null && likeName == null) { - queryParallelism = Integer.parseInt(child.getChild(0).getText()); - } else { - throw new SemanticException("Conflicting create arguments " + ast.toStringTree()); - } - break; - case HiveParser.TOK_LIKERP: - if (queryParallelism == null && likeName == null) { - likeName = unescapeIdentifier(child.getChild(0).getText()); - } else { - throw new SemanticException("Conflicting create arguments " + ast.toStringTree()); - } - break; - case HiveParser.TOK_IFNOTEXISTS: - ifNotExists = true; - break; - default: throw new SemanticException("Invalid create arguments " + ast.toStringTree()); - } - } - CreateResourcePlanDesc desc = new CreateResourcePlanDesc(resourcePlanName, queryParallelism, likeName, ifNotExists); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - - private void analyzeShowResourcePlan(ASTNode ast) throws SemanticException { - String rpName = null; - if (ast.getChildCount() > 0) { - rpName = unescapeIdentifier(ast.getChild(0).getText()); - } - if (ast.getChildCount() > 1) { - throw new SemanticException("Invalid syntax for SHOW RESOURCE PLAN statement"); - } - ShowResourcePlanDesc showResourcePlanDesc = new ShowResourcePlanDesc(rpName, ctx.getResFile().toString()); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showResourcePlanDesc))); - setFetchTask(createFetchTask(showResourcePlanDesc.getSchema())); - } - - private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { - if (ast.getChildCount() < 1) { - throw new SemanticException("Incorrect syntax"); - } - Tree nameOrGlobal = ast.getChild(0); - switch (nameOrGlobal.getType()) { - case HiveParser.TOK_ENABLE: - // This command exists solely to output this message. TODO: can we do it w/o an error? - throw new SemanticException("Activate a resource plan to enable workload management"); - case HiveParser.TOK_DISABLE: - WMNullableResourcePlan anyRp = new WMNullableResourcePlan(); - anyRp.setStatus(WMResourcePlanStatus.ENABLED); - AlterResourcePlanDesc desc = new AlterResourcePlanDesc(anyRp, null, false, false, true, false, null); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - return; - default: // Continue to handle changes to a specific plan. - } - if (ast.getChildCount() < 2) { - throw new SemanticException("Invalid syntax for ALTER RESOURCE PLAN statement"); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan(); - boolean isEnableActivate = false, isReplace = false; - boolean validate = false; - for (int i = 1; i < ast.getChildCount(); ++i) { - Tree child = ast.getChild(i); - switch (child.getType()) { - case HiveParser.TOK_VALIDATE: - validate = true; - break; - case HiveParser.TOK_ACTIVATE: - if (resourcePlan.getStatus() == WMResourcePlanStatus.ENABLED) { - isEnableActivate = true; - } - if (child.getChildCount() > 1) { - throw new SemanticException("Expected 0 or 1 arguments " + ast.toStringTree()); - } else if (child.getChildCount() == 1) { - if (child.getChild(0).getType() != HiveParser.TOK_REPLACE) { - throw new SemanticException("Incorrect syntax " + ast.toStringTree()); - } - isReplace = true; - isEnableActivate = false; // Implied. - } - resourcePlan.setStatus(WMResourcePlanStatus.ACTIVE); - break; - case HiveParser.TOK_ENABLE: - if (resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE) { - isEnableActivate = !isReplace; - } else { - resourcePlan.setStatus(WMResourcePlanStatus.ENABLED); - } - break; - case HiveParser.TOK_DISABLE: - resourcePlan.setStatus(WMResourcePlanStatus.DISABLED); - break; - case HiveParser.TOK_REPLACE: - isReplace = true; - if (child.getChildCount() > 1) { - throw new SemanticException("Expected 0 or 1 arguments " + ast.toStringTree()); - } else if (child.getChildCount() == 1) { - // Replace is essentially renaming a plan to the name of an existing plan, with backup. - resourcePlan.setName(unescapeIdentifier(child.getChild(0).getText())); - } else { - resourcePlan.setStatus(WMResourcePlanStatus.ACTIVE); - } - break; - case HiveParser.TOK_QUERY_PARALLELISM: { - if (child.getChildCount() != 1) { - throw new SemanticException("Expected one argument"); - } - Tree val = child.getChild(0); - resourcePlan.setIsSetQueryParallelism(true); - if (val.getType() == HiveParser.TOK_NULL) { - resourcePlan.unsetQueryParallelism(); - } else { - resourcePlan.setQueryParallelism(Integer.parseInt(val.getText())); - } - break; - } - case HiveParser.TOK_DEFAULT_POOL: { - if (child.getChildCount() != 1) { - throw new SemanticException("Expected one argument"); - } - Tree val = child.getChild(0); - resourcePlan.setIsSetDefaultPoolPath(true); - if (val.getType() == HiveParser.TOK_NULL) { - resourcePlan.unsetDefaultPoolPath(); - } else { - resourcePlan.setDefaultPoolPath(poolPath(child.getChild(0))); - } - break; - } - case HiveParser.TOK_RENAME: - if (child.getChildCount() != 1) { - throw new SemanticException("Expected one argument"); - } - resourcePlan.setName(unescapeIdentifier(child.getChild(0).getText())); - break; - default: - throw new SemanticException( - "Unexpected token in alter resource plan statement: " + child.getType()); - } - } - String resFile = null; - if (validate) { - ctx.setResFile(ctx.getLocalTmpPath()); - resFile = ctx.getResFile().toString(); - } - AlterResourcePlanDesc desc = new AlterResourcePlanDesc(resourcePlan, rpName, validate, isEnableActivate, false, - isReplace, resFile); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - if (validate) { - setFetchTask(createFetchTask(AlterResourcePlanDesc.SCHEMA)); - } - } - - private void analyzeDropResourcePlan(ASTNode ast) throws SemanticException { - if (ast.getChildCount() == 0) { - throw new SemanticException("Expected name in DROP RESOURCE PLAN statement"); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - boolean ifExists = false; - for (int i = 1; i < ast.getChildCount(); ++i) { - Tree child = ast.getChild(i); - switch (child.getType()) { - case HiveParser.TOK_IFEXISTS: - ifExists = true; - break; - default: throw new SemanticException("Invalid create arguments " + ast.toStringTree()); - } - } - DropResourcePlanDesc desc = new DropResourcePlanDesc(rpName, ifExists); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - - private void analyzeCreateTrigger(ASTNode ast) throws SemanticException { - if (ast.getChildCount() != 4) { - throw new SemanticException("Invalid syntax for create trigger statement"); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - String triggerName = unescapeIdentifier(ast.getChild(1).getText()); - String triggerExpression = buildTriggerExpression((ASTNode)ast.getChild(2)); - String actionExpression = buildTriggerActionExpression((ASTNode)ast.getChild(3)); - - WMTrigger trigger = new WMTrigger(rpName, triggerName); - trigger.setTriggerExpression(triggerExpression); - trigger.setActionExpression(actionExpression); - - CreateWMTriggerDesc desc = new CreateWMTriggerDesc(trigger); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - - private String buildTriggerExpression(ASTNode ast) throws SemanticException { - if (ast.getType() != HiveParser.TOK_TRIGGER_EXPRESSION || ast.getChildCount() == 0) { - throw new SemanticException("Invalid trigger expression."); - } - StringBuilder builder = new StringBuilder(); - for (int i = 0; i < ast.getChildCount(); ++i) { - builder.append(ast.getChild(i).getText()); // Don't strip quotes. - builder.append(' '); - } - builder.deleteCharAt(builder.length() - 1); - return builder.toString(); - } - - private String poolPath(Tree ast) { - StringBuilder builder = new StringBuilder(); - builder.append(unescapeIdentifier(ast.getText())); - for (int i = 0; i < ast.getChildCount(); ++i) { - // DOT is not affected - builder.append(unescapeIdentifier(ast.getChild(i).getText())); - } - return builder.toString(); - } - - private String buildTriggerActionExpression(ASTNode ast) throws SemanticException { - switch (ast.getType()) { - case HiveParser.KW_KILL: - return "KILL"; - case HiveParser.KW_MOVE: - if (ast.getChildCount() != 1) { - throw new SemanticException("Invalid move to clause in trigger action."); - } - String poolPath = poolPath(ast.getChild(0)); - return "MOVE TO " + poolPath; - default: - throw new SemanticException("Unknown token in action clause: " + ast.getType()); - } - } - - private void analyzeAlterTrigger(ASTNode ast) throws SemanticException { - if (ast.getChildCount() != 4) { - throw new SemanticException("Invalid syntax for alter trigger statement"); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - String triggerName = unescapeIdentifier(ast.getChild(1).getText()); - String triggerExpression = buildTriggerExpression((ASTNode)ast.getChild(2)); - String actionExpression = buildTriggerActionExpression((ASTNode)ast.getChild(3)); - - WMTrigger trigger = new WMTrigger(rpName, triggerName); - trigger.setTriggerExpression(triggerExpression); - trigger.setActionExpression(actionExpression); - - AlterWMTriggerDesc desc = new AlterWMTriggerDesc(trigger); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - - private void analyzeDropTrigger(ASTNode ast) throws SemanticException { - if (ast.getChildCount() != 2) { - throw new SemanticException("Invalid syntax for drop trigger."); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - String triggerName = unescapeIdentifier(ast.getChild(1).getText()); - - DropWMTriggerDesc desc = new DropWMTriggerDesc(rpName, triggerName); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - - private void analyzeCreatePool(ASTNode ast) throws SemanticException { - // TODO: allow defaults for e.g. scheduling policy. - if (ast.getChildCount() < 3) { - throw new SemanticException("Expected more arguments: " + ast.toStringTree()); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - String poolPath = poolPath(ast.getChild(1)); - WMPool pool = new WMPool(rpName, poolPath); - for (int i = 2; i < ast.getChildCount(); ++i) { - Tree child = ast.getChild(i); - if (child.getChildCount() != 1) { - throw new SemanticException("Expected 1 paramter for: " + child.getText()); - } - String param = child.getChild(0).getText(); - switch (child.getType()) { - case HiveParser.TOK_ALLOC_FRACTION: - pool.setAllocFraction(Double.parseDouble(param)); - break; - case HiveParser.TOK_QUERY_PARALLELISM: - pool.setQueryParallelism(Integer.parseInt(param)); - break; - case HiveParser.TOK_SCHEDULING_POLICY: - String schedulingPolicyStr = PlanUtils.stripQuotes(param); - if (!MetaStoreUtils.isValidSchedulingPolicy(schedulingPolicyStr)) { - throw new SemanticException("Invalid scheduling policy " + schedulingPolicyStr); - } - pool.setSchedulingPolicy(schedulingPolicyStr); - break; - case HiveParser.TOK_PATH: - throw new SemanticException("Invalid parameter path in create pool"); - } - } - if (!pool.isSetAllocFraction()) { - throw new SemanticException("alloc_fraction should be specified for a pool"); - } - if (!pool.isSetQueryParallelism()) { - throw new SemanticException("query_parallelism should be specified for a pool"); - } - CreateWMPoolDesc desc = new CreateWMPoolDesc(pool); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - - private void analyzeAlterPool(ASTNode ast) throws SemanticException { - if (ast.getChildCount() < 3) { - throw new SemanticException("Invalid syntax for alter pool: " + ast.toStringTree()); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - Tree poolTarget = ast.getChild(1); - - boolean isUnmanagedPool = false; - String poolPath = null; - if (poolTarget.getType() == HiveParser.TOK_UNMANAGED) { - isUnmanagedPool = true; - } else { - poolPath = poolPath(ast.getChild(1)); - } - - WMNullablePool poolChanges = null; - boolean hasTrigger = false; - for (int i = 2; i < ast.getChildCount(); ++i) { - Tree child = ast.getChild(i); - if (child.getChildCount() != 1) { - throw new SemanticException("Invalid syntax in alter pool expected parameter."); - } - Tree param = child.getChild(0); - if (child.getType() == HiveParser.TOK_ADD_TRIGGER - || child.getType() == HiveParser.TOK_DROP_TRIGGER) { - hasTrigger = true; - boolean drop = child.getType() == HiveParser.TOK_DROP_TRIGGER; - String triggerName = unescapeIdentifier(param.getText()); - if (drop) { - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - new AlterPoolDropTriggerDesc(rpName, triggerName, poolPath, isUnmanagedPool)))); - } else { - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - new AlterPoolAddTriggerDesc(rpName, triggerName, poolPath, isUnmanagedPool)))); - } - } else { - if (isUnmanagedPool) { - throw new SemanticException("Cannot alter the unmanaged pool"); - } - if (poolChanges == null) { - poolChanges = new WMNullablePool(rpName, null); - } - switch (child.getType()) { - case HiveParser.TOK_ALLOC_FRACTION: - poolChanges.setAllocFraction(Double.parseDouble(param.getText())); - break; - case HiveParser.TOK_QUERY_PARALLELISM: - poolChanges.setQueryParallelism(Integer.parseInt(param.getText())); - break; - case HiveParser.TOK_SCHEDULING_POLICY: - poolChanges.setIsSetSchedulingPolicy(true); - if (param.getType() != HiveParser.TOK_NULL) { - poolChanges.setSchedulingPolicy(PlanUtils.stripQuotes(param.getText())); - } - break; - case HiveParser.TOK_PATH: - poolChanges.setPoolPath(poolPath(param)); - break; - default: throw new SemanticException("Incorrect alter syntax: " + child.toStringTree()); - } - } - } - - if (poolChanges != null || hasTrigger) { - addServiceOutput(); - } - if (poolChanges != null) { - if (!poolChanges.isSetPoolPath()) { - poolChanges.setPoolPath(poolPath); - } - AlterWMPoolDesc ddlDesc = new AlterWMPoolDesc(poolChanges, poolPath); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), ddlDesc))); - } - } - - private void analyzeDropPool(ASTNode ast) throws SemanticException { - if (ast.getChildCount() != 2) { - throw new SemanticException("Invalid syntax for drop pool."); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - String poolPath = poolPath(ast.getChild(1)); - - DropWMPoolDesc desc = new DropWMPoolDesc(rpName, poolPath); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - - private void analyzeCreateOrAlterMapping(ASTNode ast, boolean update) throws SemanticException { - if (ast.getChildCount() < 4) { - throw new SemanticException("Invalid syntax for create or alter mapping."); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - String entityType = ast.getChild(1).getText(); - String entityName = PlanUtils.stripQuotes(ast.getChild(2).getText()); - WMMapping mapping = new WMMapping(rpName, entityType, entityName); - Tree dest = ast.getChild(3); - if (dest.getType() != HiveParser.TOK_UNMANAGED) { - mapping.setPoolPath(poolPath(dest)); - } // Null path => unmanaged - if (ast.getChildCount() == 5) { - mapping.setOrdering(Integer.valueOf(ast.getChild(4).getText())); - } - - org.apache.hadoop.hive.ql.ddl.DDLDesc desc = null; - if (update) { - desc = new AlterWMMappingDesc(mapping); - } else { - desc = new CreateWMMappingDesc(mapping); - } - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - - private void analyzeDropMapping(ASTNode ast) throws SemanticException { - if (ast.getChildCount() != 3) { - throw new SemanticException("Invalid syntax for drop mapping."); - } - String rpName = unescapeIdentifier(ast.getChild(0).getText()); - String entityType = ast.getChild(1).getText(); - String entityName = PlanUtils.stripQuotes(ast.getChild(2).getText()); - - DropWMMappingDesc desc = new DropWMMappingDesc(new WMMapping(rpName, entityType, entityName)); - addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - } - private void analyzeDropTable(ASTNode ast) throws SemanticException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)); boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); @@ -2566,13 +2060,6 @@ private void analyzeLockTable(ASTNode ast) ctx.setNeedLockMgr(true); } - private void addServiceOutput() throws SemanticException { - String hs2Hostname = getHS2Host(); - if (hs2Hostname != null) { - outputs.add(new WriteEntity(hs2Hostname, Type.SERVICE_NAME)); - } - } - private String getHS2Host() throws SemanticException { if (SessionState.get().isHiveServerQuery()) { return SessionState.get().getHiveServer2Host(); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 6e7ed4d83e..d56c8c6e9b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -417,9 +417,14 @@ TOK_BLOCKING; TOK_KILL_QUERY; TOK_CREATE_RP; TOK_SHOW_RP; -TOK_ALTER_RP; +TOK_ALTER_RP_ENABLE; +TOK_ALTER_RP_DISABLE; +TOK_ALTER_RP_RENAME; +TOK_ALTER_RP_SET; +TOK_ALTER_RP_UNSET; +TOK_ALTER_RP_REPLACE; +TOK_ALTER_RP_VALIDATE; TOK_DROP_RP; -TOK_VALIDATE; TOK_ACTIVATE; TOK_QUERY_PARALLELISM; TOK_RENAME; @@ -430,6 +435,8 @@ TOK_DROP_TRIGGER; TOK_TRIGGER_EXPRESSION; TOK_CREATE_POOL; TOK_ALTER_POOL; +TOK_ALTER_POOL_ADD_TRIGGER; +TOK_ALTER_POOL_DROP_TRIGGER; TOK_DROP_POOL; TOK_ALLOC_FRACTION; TOK_SCHEDULING_POLICY; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g index 0479c78f7b..0460c37f0f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g @@ -76,11 +76,11 @@ rpAssignList ; rpUnassign -@init { gParent.pushMsg("rpAssign", state); } +@init { gParent.pushMsg("rpUnassign", state); } @after { gParent.popMsg(state); } : ( - (KW_QUERY_PARALLELISM) -> ^(TOK_QUERY_PARALLELISM TOK_NULL) - | (KW_DEFAULT KW_POOL) -> ^(TOK_DEFAULT_POOL TOK_NULL) + (KW_QUERY_PARALLELISM) -> ^(TOK_QUERY_PARALLELISM) + | (KW_DEFAULT KW_POOL) -> ^(TOK_DEFAULT_POOL) ) ; @@ -111,12 +111,12 @@ alterResourcePlanStatement @init { gParent.pushMsg("alter resource plan statement", state); } @after { gParent.popMsg(state); } : KW_ALTER KW_RESOURCE KW_PLAN name=identifier ( - (KW_VALIDATE -> ^(TOK_ALTER_RP $name TOK_VALIDATE)) - | (KW_DISABLE -> ^(TOK_ALTER_RP $name TOK_DISABLE)) - | (KW_SET rpAssignList -> ^(TOK_ALTER_RP $name rpAssignList)) - | (KW_UNSET rpUnassignList -> ^(TOK_ALTER_RP $name rpUnassignList)) - | (KW_RENAME KW_TO newName=identifier -> ^(TOK_ALTER_RP $name ^(TOK_RENAME $newName))) - | ((activate enable? | enable activate?) -> ^(TOK_ALTER_RP $name activate? enable?)) + (KW_VALIDATE -> ^(TOK_ALTER_RP_VALIDATE $name)) + | (KW_DISABLE -> ^(TOK_ALTER_RP_DISABLE $name)) + | (KW_SET rpAssignList -> ^(TOK_ALTER_RP_SET $name rpAssignList)) + | (KW_UNSET rpUnassignList -> ^(TOK_ALTER_RP_UNSET $name rpUnassignList)) + | (KW_RENAME KW_TO newName=identifier -> ^(TOK_ALTER_RP_RENAME $name $newName)) + | ((activate enable? | enable activate?) -> ^(TOK_ALTER_RP_ENABLE $name activate? enable?)) ) ; @@ -125,15 +125,16 @@ alterResourcePlanStatement globalWmStatement @init { gParent.pushMsg("global WM statement", state); } @after { gParent.popMsg(state); } - : (enable | disable) KW_WORKLOAD KW_MANAGEMENT -> ^(TOK_ALTER_RP enable? disable?) + : KW_ENABLE KW_WORKLOAD KW_MANAGEMENT -> ^(TOK_ALTER_RP_ENABLE) + | KW_DISABLE KW_WORKLOAD KW_MANAGEMENT -> ^(TOK_ALTER_RP_DISABLE) ; replaceResourcePlanStatement @init { gParent.pushMsg("replace resource plan statement", state); } @after { gParent.popMsg(state); } : KW_REPLACE ( - (KW_ACTIVE KW_RESOURCE KW_PLAN KW_WITH src=identifier -> ^(TOK_ALTER_RP $src TOK_REPLACE)) - | (KW_RESOURCE KW_PLAN dest=identifier KW_WITH src=identifier -> ^(TOK_ALTER_RP $src ^(TOK_REPLACE $dest))) + (KW_ACTIVE KW_RESOURCE KW_PLAN KW_WITH src=identifier -> ^(TOK_ALTER_RP_REPLACE $src)) + | (KW_RESOURCE KW_PLAN dest=identifier KW_WITH src=identifier -> ^(TOK_ALTER_RP_REPLACE $src $dest)) ) ; @@ -216,10 +217,10 @@ alterTriggerStatement : KW_ALTER KW_TRIGGER rpName=identifier DOT triggerName=identifier ( (KW_WHEN triggerExpression KW_DO triggerActionExpression -> ^(TOK_ALTER_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression)) - | (KW_ADD KW_TO KW_POOL poolName=poolPath -> ^(TOK_ALTER_POOL $rpName $poolName ^(TOK_ADD_TRIGGER $triggerName))) - | (KW_DROP KW_FROM KW_POOL poolName=poolPath -> ^(TOK_ALTER_POOL $rpName $poolName ^(TOK_DROP_TRIGGER $triggerName))) - | (KW_ADD KW_TO KW_UNMANAGED -> ^(TOK_ALTER_POOL $rpName TOK_UNMANAGED ^(TOK_ADD_TRIGGER $triggerName))) - | (KW_DROP KW_FROM KW_UNMANAGED -> ^(TOK_ALTER_POOL $rpName TOK_UNMANAGED ^(TOK_DROP_TRIGGER $triggerName))) + | (KW_ADD KW_TO KW_POOL poolName=poolPath -> ^(TOK_ALTER_POOL_ADD_TRIGGER $rpName $poolName $triggerName)) + | (KW_DROP KW_FROM KW_POOL poolName=poolPath -> ^(TOK_ALTER_POOL_DROP_TRIGGER $rpName $poolName $triggerName)) + | (KW_ADD KW_TO KW_UNMANAGED -> ^(TOK_ALTER_POOL_ADD_TRIGGER $rpName TOK_UNMANAGED $triggerName)) + | (KW_DROP KW_FROM KW_UNMANAGED -> ^(TOK_ALTER_POOL_DROP_TRIGGER $rpName TOK_UNMANAGED $triggerName)) ) ; @@ -262,10 +263,8 @@ alterPoolStatement : KW_ALTER KW_POOL rpName=identifier DOT poolPath ( (KW_SET poolAssignList -> ^(TOK_ALTER_POOL $rpName poolPath poolAssignList)) | (KW_UNSET KW_SCHEDULING_POLICY -> ^(TOK_ALTER_POOL $rpName poolPath ^(TOK_SCHEDULING_POLICY TOK_NULL))) - | (KW_ADD KW_TRIGGER triggerName=identifier - -> ^(TOK_ALTER_POOL $rpName poolPath ^(TOK_ADD_TRIGGER $triggerName))) - | (KW_DROP KW_TRIGGER triggerName=identifier - -> ^(TOK_ALTER_POOL $rpName poolPath ^(TOK_DROP_TRIGGER $triggerName))) + | (KW_ADD KW_TRIGGER triggerName=identifier -> ^(TOK_ALTER_POOL_ADD_TRIGGER $rpName poolPath $triggerName)) + | (KW_DROP KW_TRIGGER triggerName=identifier -> ^(TOK_ALTER_POOL_DROP_TRIGGER $rpName poolPath $triggerName)) ) ; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 4f95c51b55..45dec5a8e3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -141,19 +141,6 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t case HiveParser.TOK_UNLOCKTABLE: case HiveParser.TOK_TRUNCATETABLE: case HiveParser.TOK_CACHE_METADATA: - case HiveParser.TOK_CREATE_RP: - case HiveParser.TOK_SHOW_RP: - case HiveParser.TOK_ALTER_RP: - case HiveParser.TOK_DROP_RP: - case HiveParser.TOK_CREATE_TRIGGER: - case HiveParser.TOK_ALTER_TRIGGER: - case HiveParser.TOK_DROP_TRIGGER: - case HiveParser.TOK_CREATE_POOL: - case HiveParser.TOK_ALTER_POOL: - case HiveParser.TOK_DROP_POOL: - case HiveParser.TOK_CREATE_MAPPING: - case HiveParser.TOK_ALTER_MAPPING: - case HiveParser.TOK_DROP_MAPPING: return new DDLSemanticAnalyzer(queryState); case HiveParser.TOK_ANALYZE: diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index f1ab99e069..193eff94a4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -186,13 +186,16 @@ KILL_QUERY("KILL QUERY", HiveParser.TOK_KILL_QUERY, null, null), CREATE_RESOURCEPLAN("CREATE RESOURCEPLAN", HiveParser.TOK_CREATE_RP, null, null, false, false), SHOW_RESOURCEPLAN("SHOW RESOURCEPLAN", HiveParser.TOK_SHOW_RP, null, null, false, false), - ALTER_RESOURCEPLAN("ALTER RESOURCEPLAN", HiveParser.TOK_ALTER_RP, null, null, false, false), + ALTER_RESOURCEPLAN("ALTER RESOURCEPLAN", new int[] {HiveParser.TOK_ALTER_RP_VALIDATE, HiveParser.TOK_ALTER_RP_RENAME, + HiveParser.TOK_ALTER_RP_SET, HiveParser.TOK_ALTER_RP_UNSET, HiveParser.TOK_ALTER_RP_ENABLE, + HiveParser.TOK_ALTER_RP_DISABLE, HiveParser.TOK_ALTER_RP_REPLACE}, null, null, false, false), DROP_RESOURCEPLAN("DROP RESOURCEPLAN", HiveParser.TOK_DROP_RP, null, null, false, false), CREATE_TRIGGER("CREATE TRIGGER", HiveParser.TOK_CREATE_TRIGGER, null, null, false, false), ALTER_TRIGGER("ALTER TRIGGER", HiveParser.TOK_ALTER_TRIGGER, null, null, false, false), DROP_TRIGGER("DROP TRIGGER", HiveParser.TOK_DROP_TRIGGER, null, null, false, false), CREATE_POOL("CREATE POOL", HiveParser.TOK_CREATE_POOL, null, null, false, false), - ALTER_POOL("ALTER POOL", HiveParser.TOK_ALTER_POOL, null, null, false, false), + ALTER_POOL("ALTER POOL", new int[] {HiveParser.TOK_ALTER_POOL, HiveParser.TOK_ALTER_POOL_ADD_TRIGGER, + HiveParser.TOK_ALTER_POOL_DROP_TRIGGER}, null, null, false, false), DROP_POOL("DROP POOL", HiveParser.TOK_DROP_POOL, null, null, false, false), CREATE_MAPPING("CREATE MAPPING", HiveParser.TOK_CREATE_MAPPING, null, null, false, false), ALTER_MAPPING("ALTER MAPPING", HiveParser.TOK_ALTER_MAPPING, null, null, false, false), diff --git ql/src/test/queries/clientpositive/resourceplan.q ql/src/test/queries/clientpositive/resourceplan.q index 8bc5697a6c..0466b91e89 100644 --- ql/src/test/queries/clientpositive/resourceplan.q +++ ql/src/test/queries/clientpositive/resourceplan.q @@ -22,6 +22,10 @@ source ../../metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql; SHOW RESOURCE PLANS; SELECT * FROM SYS.WM_RESOURCEPLANS; +-- Show how ENABLE WORKLOAD MANAGEMENT not works :) +EXPLAIN ENABLE WORKLOAD MANAGEMENT; +ENABLE WORKLOAD MANAGEMENT; + -- Create and show plan_1. CREATE RESOURCE PLAN plan_1; EXPLAIN SHOW RESOURCE PLANS; @@ -115,6 +119,7 @@ ALTER RESOURCE PLAN plan_3 DISABLE; SELECT * FROM SYS.WM_RESOURCEPLANS; -- DISABLE WM - ok. +EXPLAIN DISABLE WORKLOAD MANAGEMENT; DISABLE WORKLOAD MANAGEMENT; SELECT * FROM SYS.WM_RESOURCEPLANS; @@ -262,6 +267,7 @@ CREATE POOL plan_2.default.c2 WITH QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.75; -- Cannot activate c1 + c2 = 1.0 +EXPLAIN ALTER RESOURCE PLAN plan_2 VALIDATE; ALTER RESOURCE PLAN plan_2 VALIDATE; ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE; @@ -316,7 +322,16 @@ SELECT * FROM SYS.WM_POOLS; SELECT * FROM SYS.WM_RESOURCEPLANS; -- Changed default pool, now it should work. -ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool; +EXPLAIN ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=2; +ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=2; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +EXPLAIN ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL, QUERY_PARALLELISM; +ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL, QUERY_PARALLELISM; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=1; + DROP POOL `table`.default; SELECT * FROM SYS.WM_POOLS; @@ -439,6 +454,7 @@ SELECT * FROM SYS.WM_TRIGGERS; SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS; SELECT * FROM SYS.WM_MAPPINGS; +EXPLAIN REPLACE RESOURCE PLAN plan_4a WITH plan_4b; REPLACE RESOURCE PLAN plan_4a WITH plan_4b; SELECT * FROM SYS.WM_RESOURCEPLANS; SELECT * FROM SYS.WM_POOLS; @@ -447,6 +463,7 @@ REPLACE ACTIVE RESOURCE PLAN WITH plan_4a; SELECT * FROM SYS.WM_RESOURCEPLANS; CREATE RESOURCE PLAN plan_4a LIKE plan_4; CREATE POOL plan_4a.pool3 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0; +EXPLAIN ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE; ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE; SELECT * FROM SYS.WM_RESOURCEPLANS; SELECT * FROM SYS.WM_POOLS; diff --git ql/src/test/results/clientpositive/llap/resourceplan.q.out ql/src/test/results/clientpositive/llap/resourceplan.q.out index a3b6956eb5..5752d4097d 100644 --- ql/src/test/results/clientpositive/llap/resourceplan.q.out +++ ql/src/test/results/clientpositive/llap/resourceplan.q.out @@ -3588,6 +3588,19 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS POSTHOOK: type: QUERY POSTHOOK: Input: sys@wm_resourceplans #### A masked pattern was here #### +Activate a resource plan to enable workload management! +PREHOOK: query: EXPLAIN ENABLE WORKLOAD MANAGEMENT +PREHOOK: type: ALTER RESOURCEPLAN +POSTHOOK: query: EXPLAIN ENABLE WORKLOAD MANAGEMENT +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + +STAGE PLANS: +Activate a resource plan to enable workload management! +PREHOOK: query: ENABLE WORKLOAD MANAGEMENT +PREHOOK: type: ALTER RESOURCEPLAN +POSTHOOK: query: ENABLE WORKLOAD MANAGEMENT +POSTHOOK: type: ALTER RESOURCEPLAN PREHOOK: query: CREATE RESOURCE PLAN plan_1 PREHOOK: type: CREATE RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -3630,7 +3643,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Show Resource plans - resourcePlanName: plan_1 + Resource plan name: plan_1 Stage: Stage-1 Fetch Operator @@ -3665,9 +3678,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Create ResourcePlan - planName: plan_2 - queryParallelism: 5 + Create Resource plan + Query parallelism: 5 + Resource plan name: plan_2 PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 PREHOOK: type: CREATE RESOURCEPLAN @@ -3684,10 +3697,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Alter Resource plans - Resource plan to modify: plan_2 - Resource plan changed fields: - shouldValidate: false + Alter Resource plan Set + Query parallelism: 10 + Resource plan name: plan_2 PREHOOK: query: ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 PREHOOK: type: ALTER RESOURCEPLAN @@ -3798,10 +3810,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Alter Resource plans - Resource plan to modify: plan_3 - Resource plan changed fields: - shouldValidate: false + Alter Resource plan Set + Default pool: default1 + Query parallelism: 30 + Resource plan name: plan_3 PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 PREHOOK: type: ALTER RESOURCEPLAN @@ -3827,10 +3839,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Alter Resource plans - Resource plan to modify: plan_3 - Resource plan changed fields: - shouldValidate: false + Enable Resource plan + Resource plan name: plan_3 + Enable: true PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE PREHOOK: type: ALTER RESOURCEPLAN @@ -3847,10 +3858,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Alter Resource plans - Resource plan to modify: plan_3 - Resource plan changed fields: - shouldValidate: false + Rename Resource plan + New resource plan name: plan_4 + Resource plan name: plan_3 PREHOOK: query: ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 PREHOOK: type: ALTER RESOURCEPLAN @@ -3977,6 +3987,18 @@ POSTHOOK: Input: sys@wm_resourceplans #### A masked pattern was here #### plan_2 default DISABLED 10 default plan_3 default ACTIVE NULL default +PREHOOK: query: EXPLAIN DISABLE WORKLOAD MANAGEMENT +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DISABLE WORKLOAD MANAGEMENT +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Disable Resource plan + PREHOOK: query: DISABLE WORKLOAD MANAGEMENT PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -4082,8 +4104,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Drop Resource plans - resourcePlanName: plan_2 + Drop Resource plan + Resource plan name: plan_2 PREHOOK: query: DROP RESOURCE PLAN plan_2 PREHOOK: type: DROP RESOURCEPLAN @@ -4180,7 +4202,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Create WM Trigger - trigger: + Action expression: KILL + Resource plan name: plan_1 + Trigger expression: BYTES_READ > '10kb' + Trigger name: trigger_1 PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL PREHOOK: type: CREATE TRIGGER @@ -4244,7 +4269,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Alter WM Trigger - trigger: + Action expression: KILL + Resource plan name: plan_1 + Trigger expression: BYTES_READ > '1GB' + Trigger name: trigger_1 PREHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL PREHOOK: type: ALTER TRIGGER @@ -4272,8 +4300,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Drop WM Trigger - resourcePlanName: plan_1 - triggerName: trigger_1 + Resource plan name: plan_1 + Trigger name: trigger_1 PREHOOK: query: DROP TRIGGER plan_1.trigger_1 PREHOOK: type: DROP TRIGGER @@ -4435,7 +4463,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Create Pool - pool: + Alloc fraction: 1.0 + Pool path: default + Query parallelism: 5 + Resource plan name: plan_1 + Scheduling policy: default PREHOOK: query: CREATE POOL plan_1.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' @@ -4459,7 +4491,11 @@ POSTHOOK: Input: sys@wm_pools plan_1 default default 1.0 4 NULL plan_2 default default 1.0 5 NULL table default default 1.0 4 NULL -FAILED: SemanticException Invalid scheduling policy invalid +PREHOOK: query: CREATE POOL plan_2.default.c1 WITH + ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='invalid' +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid scheduling policy invalid PREHOOK: query: CREATE POOL plan_2.default.c1 WITH ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair' PREHOOK: type: CREATE POOL @@ -4474,6 +4510,26 @@ PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: CREATE POOL plan_2.default.c2 WITH QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.75 POSTHOOK: type: CREATE POOL +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_2 VALIDATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_2 VALIDATE +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Validate Resource Plan + Resource plan name: plan_2 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -4495,8 +4551,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Alter Pool - pool: - poolPath: default.c2 + Alloc fraction: 0.7 + Pool path: default.c2 + Query parallelism: 1 + Resource plan name: plan_2 PREHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 PREHOOK: type: ALTER POOL @@ -4589,7 +4647,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Drop WM Pool - poolName: plan_2 + Pool path: default + Resource plan name: plan_2 PREHOOK: query: DROP POOL plan_2.default PREHOOK: type: DROP POOL @@ -4738,10 +4797,72 @@ POSTHOOK: Input: sys@wm_resourceplans plan_1 default ACTIVE NULL default plan_2 default DISABLED 10 def table default DISABLED 1 default -PREHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=2 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=2 +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plan Set + Default pool: table.pool + Query parallelism: 2 + Resource plan name: table + +PREHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=2 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=2 +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ACTIVE NULL default +plan_2 default DISABLED 10 def +table default DISABLED 2 table.pool +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL, QUERY_PARALLELISM +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL, QUERY_PARALLELISM +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan name: table + Unset Default Pool: true + Unset Query parallelism: true + +PREHOOK: query: ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL, QUERY_PARALLELISM +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL, QUERY_PARALLELISM +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ACTIVE NULL default +plan_2 default DISABLED 10 def +table default DISABLED NULL NULL +PREHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=1 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool, QUERY_PARALLELISM=1 POSTHOOK: type: ALTER RESOURCEPLAN PREHOOK: query: DROP POOL `table`.default PREHOOK: type: DROP POOL @@ -4791,8 +4912,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Create Trigger to pool mappings - resourcePlanName: plan_2 Pool path: def.c1 + Resource plan name: plan_2 Trigger name: trigger_1 PREHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 @@ -4927,8 +5048,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Drop Trigger to pool mappings - resourcePlanName: plan_2 Pool path: def.c1 + Resource plan name: plan_2 Trigger name: trigger_1 PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 @@ -4971,7 +5092,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Create Mapping - mapping: + Entity name: user1 + Entity type: USER + Pool path: def + Resource plan name: plan_2 PREHOOK: query: CREATE USER MAPPING "user1" IN plan_2 TO def PREHOOK: type: CREATE MAPPING @@ -5009,7 +5133,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Create Mapping - mapping: + Entity name: group3 + Entity type: GROUP + Ordering: 1 + Resource plan name: plan_2 PREHOOK: query: CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 PREHOOK: type: CREATE MAPPING @@ -5027,7 +5154,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Alter Mapping - mapping: + Entity name: user1 + Entity type: USER + Resource plan name: plan_2 PREHOOK: query: ALTER USER MAPPING "user1" IN plan_2 UNMANAGED PREHOOK: type: ALTER MAPPING @@ -5081,7 +5210,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Drop mapping - mapping: + Entity name: user2 + Entity type: USER + Resource plan name: plan_2 PREHOOK: query: DROP USER MAPPING "user2" in plan_2 PREHOOK: type: DROP MAPPING @@ -5099,7 +5230,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Drop mapping - mapping: + Entity name: group2 + Entity type: GROUP + Resource plan name: plan_2 PREHOOK: query: DROP GROUP MAPPING "group2" in plan_2 PREHOOK: type: DROP MAPPING @@ -5325,6 +5458,20 @@ POSTHOOK: Input: sys@wm_mappings #### A masked pattern was here #### plan_4a default USER user1 pool1 0 plan_4b default USER user1 pool1 0 +PREHOOK: query: EXPLAIN REPLACE RESOURCE PLAN plan_4a WITH plan_4b +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN REPLACE RESOURCE PLAN plan_4a WITH plan_4b +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Replace Resource plan + Destination Resource plan name: plan_4a + Resource plan name: plan_4b + PREHOOK: query: REPLACE RESOURCE PLAN plan_4a WITH plan_4b PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -5404,6 +5551,22 @@ PREHOOK: type: CREATE POOL PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: CREATE POOL plan_4a.pool3 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 POSTHOOK: type: CREATE POOL +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Enable Resource plan + Resource plan name: plan_4a + Activate: true + Enable: true + Replace: true + PREHOOK: query: ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest diff --git ql/src/test/results/clientpositive/resourceplan.q.out ql/src/test/results/clientpositive/resourceplan.q.out deleted file mode 100644 index bf79d75e8b..0000000000 --- ql/src/test/results/clientpositive/resourceplan.q.out +++ /dev/null @@ -1,5440 +0,0 @@ -PREHOOK: query: show grant user hive_test_user -PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user -POSTHOOK: type: SHOW_GRANT -default alltypesorc hive_test_user USER DELETE true -1 hive_test_user -default alltypesorc hive_test_user USER INSERT true -1 hive_test_user -default alltypesorc hive_test_user USER SELECT true -1 hive_test_user -default alltypesorc hive_test_user USER UPDATE true -1 hive_test_user -default alltypesparquet hive_test_user USER DELETE true -1 hive_test_user -default alltypesparquet hive_test_user USER INSERT true -1 hive_test_user -default alltypesparquet hive_test_user USER SELECT true -1 hive_test_user -default alltypesparquet hive_test_user USER UPDATE true -1 hive_test_user -default cbo_t1 hive_test_user USER DELETE true -1 hive_test_user -default cbo_t1 hive_test_user USER INSERT true -1 hive_test_user -default cbo_t1 hive_test_user USER SELECT true -1 hive_test_user -default cbo_t1 hive_test_user USER UPDATE true -1 hive_test_user -default cbo_t2 hive_test_user USER DELETE true -1 hive_test_user -default cbo_t2 hive_test_user USER INSERT true -1 hive_test_user -default cbo_t2 hive_test_user USER SELECT true -1 hive_test_user -default cbo_t2 hive_test_user USER UPDATE true -1 hive_test_user -default cbo_t3 hive_test_user USER DELETE true -1 hive_test_user -default cbo_t3 hive_test_user USER INSERT true -1 hive_test_user -default cbo_t3 hive_test_user USER SELECT true -1 hive_test_user -default cbo_t3 hive_test_user USER UPDATE true -1 hive_test_user -default lineitem hive_test_user USER DELETE true -1 hive_test_user -default lineitem hive_test_user USER INSERT true -1 hive_test_user -default lineitem hive_test_user USER SELECT true -1 hive_test_user -default lineitem hive_test_user USER UPDATE true -1 hive_test_user -default part hive_test_user USER DELETE true -1 hive_test_user -default part hive_test_user USER INSERT true -1 hive_test_user -default part hive_test_user USER SELECT true -1 hive_test_user -default part hive_test_user USER UPDATE true -1 hive_test_user -default src hive_test_user USER DELETE true -1 hive_test_user -default src hive_test_user USER INSERT true -1 hive_test_user -default src hive_test_user USER SELECT true -1 hive_test_user -default src hive_test_user USER UPDATE true -1 hive_test_user -default src1 hive_test_user USER DELETE true -1 hive_test_user -default src1 hive_test_user USER INSERT true -1 hive_test_user -default src1 hive_test_user USER SELECT true -1 hive_test_user -default src1 hive_test_user USER UPDATE true -1 hive_test_user -default src_cbo hive_test_user USER DELETE true -1 hive_test_user -default src_cbo hive_test_user USER INSERT true -1 hive_test_user -default src_cbo hive_test_user USER SELECT true -1 hive_test_user -default src_cbo hive_test_user USER UPDATE true -1 hive_test_user -default src_json hive_test_user USER DELETE true -1 hive_test_user -default src_json hive_test_user USER INSERT true -1 hive_test_user -default src_json hive_test_user USER SELECT true -1 hive_test_user -default src_json hive_test_user USER UPDATE true -1 hive_test_user -default src_sequencefile hive_test_user USER DELETE true -1 hive_test_user -default src_sequencefile hive_test_user USER INSERT true -1 hive_test_user -default src_sequencefile hive_test_user USER SELECT true -1 hive_test_user -default src_sequencefile hive_test_user USER UPDATE true -1 hive_test_user -default src_thrift hive_test_user USER DELETE true -1 hive_test_user -default src_thrift hive_test_user USER INSERT true -1 hive_test_user -default src_thrift hive_test_user USER SELECT true -1 hive_test_user -default src_thrift hive_test_user USER UPDATE true -1 hive_test_user -default srcbucket hive_test_user USER DELETE true -1 hive_test_user -default srcbucket hive_test_user USER INSERT true -1 hive_test_user -default srcbucket hive_test_user USER SELECT true -1 hive_test_user -default srcbucket hive_test_user USER UPDATE true -1 hive_test_user -default srcbucket2 hive_test_user USER DELETE true -1 hive_test_user -default srcbucket2 hive_test_user USER INSERT true -1 hive_test_user -default srcbucket2 hive_test_user USER SELECT true -1 hive_test_user -default srcbucket2 hive_test_user USER UPDATE true -1 hive_test_user -default srcpart hive_test_user USER DELETE true -1 hive_test_user -default srcpart hive_test_user USER INSERT true -1 hive_test_user -default srcpart hive_test_user USER SELECT true -1 hive_test_user -default srcpart hive_test_user USER UPDATE true -1 hive_test_user -PREHOOK: query: CREATE DATABASE IF NOT EXISTS SYS -PREHOOK: type: CREATEDATABASE -PREHOOK: Output: database:SYS -POSTHOOK: query: CREATE DATABASE IF NOT EXISTS SYS -POSTHOOK: type: CREATEDATABASE -POSTHOOK: Output: database:SYS -PREHOOK: query: USE SYS -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:sys -POSTHOOK: query: USE SYS -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `BUCKETING_COLS` ( - `SD_ID` bigint, - `BUCKET_COL_NAME` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_BUCKETING_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"BUCKET_COL_NAME\", - \"INTEGER_IDX\" -FROM - \"BUCKETING_COLS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@BUCKETING_COLS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `BUCKETING_COLS` ( - `SD_ID` bigint, - `BUCKET_COL_NAME` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_BUCKETING_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"BUCKET_COL_NAME\", - \"INTEGER_IDX\" -FROM - \"BUCKETING_COLS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@BUCKETING_COLS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `CDS` ( - `CD_ID` bigint, - CONSTRAINT `SYS_PK_CDS` PRIMARY KEY (`CD_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CD_ID\" -FROM - \"CDS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@CDS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `CDS` ( - `CD_ID` bigint, - CONSTRAINT `SYS_PK_CDS` PRIMARY KEY (`CD_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CD_ID\" -FROM - \"CDS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@CDS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COLUMNS_V2` ( - `CD_ID` bigint, - `COMMENT` string, - `COLUMN_NAME` string, - `TYPE_NAME` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_COLUMN_V2` PRIMARY KEY (`CD_ID`,`COLUMN_NAME`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CD_ID\", - \"COMMENT\", - \"COLUMN_NAME\", - \"TYPE_NAME\", - \"INTEGER_IDX\" -FROM - \"COLUMNS_V2\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@COLUMNS_V2 -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COLUMNS_V2` ( - `CD_ID` bigint, - `COMMENT` string, - `COLUMN_NAME` string, - `TYPE_NAME` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_COLUMN_V2` PRIMARY KEY (`CD_ID`,`COLUMN_NAME`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CD_ID\", - \"COMMENT\", - \"COLUMN_NAME\", - \"TYPE_NAME\", - \"INTEGER_IDX\" -FROM - \"COLUMNS_V2\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@COLUMNS_V2 -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DATABASE_PARAMS` ( - `DB_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_DATABASE_PARAMS` PRIMARY KEY (`DB_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"DB_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"DATABASE_PARAMS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@DATABASE_PARAMS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DATABASE_PARAMS` ( - `DB_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_DATABASE_PARAMS` PRIMARY KEY (`DB_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"DB_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"DATABASE_PARAMS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@DATABASE_PARAMS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DBS` ( - `DB_ID` bigint, - `DB_LOCATION_URI` string, - `NAME` string, - `OWNER_NAME` string, - `OWNER_TYPE` string, - CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"DB_ID\", - \"DB_LOCATION_URI\", - \"NAME\", - \"OWNER_NAME\", - \"OWNER_TYPE\" -FROM - \"DBS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@DBS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DBS` ( - `DB_ID` bigint, - `DB_LOCATION_URI` string, - `NAME` string, - `OWNER_NAME` string, - `OWNER_TYPE` string, - CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"DB_ID\", - \"DB_LOCATION_URI\", - \"NAME\", - \"OWNER_NAME\", - \"OWNER_TYPE\" -FROM - \"DBS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@DBS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DB_PRIVS` ( - `DB_GRANT_ID` bigint, - `CREATE_TIME` int, - `DB_ID` bigint, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `DB_PRIV` string, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"DB_GRANT_ID\", - \"CREATE_TIME\", - \"DB_ID\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"DB_PRIV\", - \"AUTHORIZER\" -FROM - \"DB_PRIVS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@DB_PRIVS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DB_PRIVS` ( - `DB_GRANT_ID` bigint, - `CREATE_TIME` int, - `DB_ID` bigint, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `DB_PRIV` string, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"DB_GRANT_ID\", - \"CREATE_TIME\", - \"DB_ID\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"DB_PRIV\", - \"AUTHORIZER\" -FROM - \"DB_PRIVS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@DB_PRIVS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( - `USER_GRANT_ID` bigint, - `CREATE_TIME` int, - `GRANT_OPTION` string, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `USER_PRIV` string, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"USER_GRANT_ID\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"USER_PRIV\", - \"AUTHORIZER\" -FROM - \"GLOBAL_PRIVS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@GLOBAL_PRIVS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( - `USER_GRANT_ID` bigint, - `CREATE_TIME` int, - `GRANT_OPTION` string, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `USER_PRIV` string, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"USER_GRANT_ID\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"USER_PRIV\", - \"AUTHORIZER\" -FROM - \"GLOBAL_PRIVS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@GLOBAL_PRIVS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITIONS` ( - `PART_ID` bigint, - `CREATE_TIME` int, - `LAST_ACCESS_TIME` int, - `PART_NAME` string, - `SD_ID` bigint, - `TBL_ID` bigint, - CONSTRAINT `SYS_PK_PARTITIONS` PRIMARY KEY (`PART_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_ID\", - \"CREATE_TIME\", - \"LAST_ACCESS_TIME\", - \"PART_NAME\", - \"SD_ID\", - \"TBL_ID\" -FROM - \"PARTITIONS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@PARTITIONS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITIONS` ( - `PART_ID` bigint, - `CREATE_TIME` int, - `LAST_ACCESS_TIME` int, - `PART_NAME` string, - `SD_ID` bigint, - `TBL_ID` bigint, - CONSTRAINT `SYS_PK_PARTITIONS` PRIMARY KEY (`PART_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_ID\", - \"CREATE_TIME\", - \"LAST_ACCESS_TIME\", - \"PART_NAME\", - \"SD_ID\", - \"TBL_ID\" -FROM - \"PARTITIONS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@PARTITIONS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEYS` ( - `TBL_ID` bigint, - `PKEY_COMMENT` string, - `PKEY_NAME` string, - `PKEY_TYPE` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_PARTITION_KEYS` PRIMARY KEY (`TBL_ID`,`PKEY_NAME`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_ID\", - \"PKEY_COMMENT\", - \"PKEY_NAME\", - \"PKEY_TYPE\", - \"INTEGER_IDX\" -FROM - \"PARTITION_KEYS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@PARTITION_KEYS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEYS` ( - `TBL_ID` bigint, - `PKEY_COMMENT` string, - `PKEY_NAME` string, - `PKEY_TYPE` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_PARTITION_KEYS` PRIMARY KEY (`TBL_ID`,`PKEY_NAME`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_ID\", - \"PKEY_COMMENT\", - \"PKEY_NAME\", - \"PKEY_TYPE\", - \"INTEGER_IDX\" -FROM - \"PARTITION_KEYS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@PARTITION_KEYS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( - `PART_ID` bigint, - `PART_KEY_VAL` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_PARTITION_KEY_VALS` PRIMARY KEY (`PART_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_ID\", - \"PART_KEY_VAL\", - \"INTEGER_IDX\" -FROM - \"PARTITION_KEY_VALS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@PARTITION_KEY_VALS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( - `PART_ID` bigint, - `PART_KEY_VAL` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_PARTITION_KEY_VALS` PRIMARY KEY (`PART_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_ID\", - \"PART_KEY_VAL\", - \"INTEGER_IDX\" -FROM - \"PARTITION_KEY_VALS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@PARTITION_KEY_VALS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_PARAMS` ( - `PART_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_PARTITION_PARAMS` PRIMARY KEY (`PART_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"PARTITION_PARAMS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@PARTITION_PARAMS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_PARAMS` ( - `PART_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_PARTITION_PARAMS` PRIMARY KEY (`PART_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"PARTITION_PARAMS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@PARTITION_PARAMS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_PRIVS` ( - `PART_COLUMN_GRANT_ID` bigint, - `COLUMN_NAME` string, - `CREATE_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PART_ID` bigint, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `PART_COL_PRIV` string, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_COLUMN_GRANT_ID\", - \"COLUMN_NAME\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PART_ID\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"PART_COL_PRIV\", - \"AUTHORIZER\" -FROM - \"PART_COL_PRIVS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@PART_COL_PRIVS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_PRIVS` ( - `PART_COLUMN_GRANT_ID` bigint, - `COLUMN_NAME` string, - `CREATE_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PART_ID` bigint, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `PART_COL_PRIV` string, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_COLUMN_GRANT_ID\", - \"COLUMN_NAME\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PART_ID\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"PART_COL_PRIV\", - \"AUTHORIZER\" -FROM - \"PART_COL_PRIVS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@PART_COL_PRIVS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_PRIVS` ( - `PART_GRANT_ID` bigint, - `CREATE_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PART_ID` bigint, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `PART_PRIV` string, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_GRANT_ID\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PART_ID\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"PART_PRIV\", - \"AUTHORIZER\" -FROM - \"PART_PRIVS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@PART_PRIVS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_PRIVS` ( - `PART_GRANT_ID` bigint, - `CREATE_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PART_ID` bigint, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `PART_PRIV` string, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"PART_GRANT_ID\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PART_ID\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"PART_PRIV\", - \"AUTHORIZER\" -FROM - \"PART_PRIVS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@PART_PRIVS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `ROLES` ( - `ROLE_ID` bigint, - `CREATE_TIME` int, - `OWNER_NAME` string, - `ROLE_NAME` string, - CONSTRAINT `SYS_PK_ROLES` PRIMARY KEY (`ROLE_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"ROLE_ID\", - \"CREATE_TIME\", - \"OWNER_NAME\", - \"ROLE_NAME\" -FROM - \"ROLES\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@ROLES -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `ROLES` ( - `ROLE_ID` bigint, - `CREATE_TIME` int, - `OWNER_NAME` string, - `ROLE_NAME` string, - CONSTRAINT `SYS_PK_ROLES` PRIMARY KEY (`ROLE_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"ROLE_ID\", - \"CREATE_TIME\", - \"OWNER_NAME\", - \"ROLE_NAME\" -FROM - \"ROLES\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@ROLES -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `ROLE_MAP` ( - `ROLE_GRANT_ID` bigint, - `ADD_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `ROLE_ID` bigint, - CONSTRAINT `SYS_PK_ROLE_MAP` PRIMARY KEY (`ROLE_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"ROLE_GRANT_ID\", - \"ADD_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"ROLE_ID\" -FROM - \"ROLE_MAP\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@ROLE_MAP -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `ROLE_MAP` ( - `ROLE_GRANT_ID` bigint, - `ADD_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `ROLE_ID` bigint, - CONSTRAINT `SYS_PK_ROLE_MAP` PRIMARY KEY (`ROLE_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"ROLE_GRANT_ID\", - \"ADD_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"ROLE_ID\" -FROM - \"ROLE_MAP\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@ROLE_MAP -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SDS` ( - `SD_ID` bigint, - `CD_ID` bigint, - `INPUT_FORMAT` string, - `IS_COMPRESSED` boolean, - `IS_STOREDASSUBDIRECTORIES` boolean, - `LOCATION` string, - `NUM_BUCKETS` int, - `OUTPUT_FORMAT` string, - `SERDE_ID` bigint, - CONSTRAINT `SYS_PK_SDS` PRIMARY KEY (`SD_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"CD_ID\", - \"INPUT_FORMAT\", - \"IS_COMPRESSED\", - \"IS_STOREDASSUBDIRECTORIES\", - \"LOCATION\", - \"NUM_BUCKETS\", - \"OUTPUT_FORMAT\", - \"SERDE_ID\" -FROM - \"SDS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SDS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SDS` ( - `SD_ID` bigint, - `CD_ID` bigint, - `INPUT_FORMAT` string, - `IS_COMPRESSED` boolean, - `IS_STOREDASSUBDIRECTORIES` boolean, - `LOCATION` string, - `NUM_BUCKETS` int, - `OUTPUT_FORMAT` string, - `SERDE_ID` bigint, - CONSTRAINT `SYS_PK_SDS` PRIMARY KEY (`SD_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"CD_ID\", - \"INPUT_FORMAT\", - \"IS_COMPRESSED\", - \"IS_STOREDASSUBDIRECTORIES\", - \"LOCATION\", - \"NUM_BUCKETS\", - \"OUTPUT_FORMAT\", - \"SERDE_ID\" -FROM - \"SDS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SDS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SD_PARAMS` ( - `SD_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_SD_PARAMS` PRIMARY KEY (`SD_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"SD_PARAMS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SD_PARAMS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SD_PARAMS` ( - `SD_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_SD_PARAMS` PRIMARY KEY (`SD_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"SD_PARAMS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SD_PARAMS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( - `SEQUENCE_NAME` string, - `NEXT_VAL` bigint, - CONSTRAINT `SYS_PK_SEQUENCE_TABLE` PRIMARY KEY (`SEQUENCE_NAME`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SEQUENCE_NAME\", - \"NEXT_VAL\" -FROM - \"SEQUENCE_TABLE\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SEQUENCE_TABLE -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( - `SEQUENCE_NAME` string, - `NEXT_VAL` bigint, - CONSTRAINT `SYS_PK_SEQUENCE_TABLE` PRIMARY KEY (`SEQUENCE_NAME`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SEQUENCE_NAME\", - \"NEXT_VAL\" -FROM - \"SEQUENCE_TABLE\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SEQUENCE_TABLE -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SERDES` ( - `SERDE_ID` bigint, - `NAME` string, - `SLIB` string, - CONSTRAINT `SYS_PK_SERDES` PRIMARY KEY (`SERDE_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SERDE_ID\", - \"NAME\", - \"SLIB\" -FROM - \"SERDES\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SERDES -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SERDES` ( - `SERDE_ID` bigint, - `NAME` string, - `SLIB` string, - CONSTRAINT `SYS_PK_SERDES` PRIMARY KEY (`SERDE_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SERDE_ID\", - \"NAME\", - \"SLIB\" -FROM - \"SERDES\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SERDES -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SERDE_PARAMS` ( - `SERDE_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_SERDE_PARAMS` PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SERDE_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"SERDE_PARAMS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SERDE_PARAMS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SERDE_PARAMS` ( - `SERDE_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_SERDE_PARAMS` PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SERDE_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"SERDE_PARAMS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SERDE_PARAMS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( - `SD_ID` bigint, - `SKEWED_COL_NAME` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_SKEWED_COL_NAMES` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"SKEWED_COL_NAME\", - \"INTEGER_IDX\" -FROM - \"SKEWED_COL_NAMES\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SKEWED_COL_NAMES -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( - `SD_ID` bigint, - `SKEWED_COL_NAME` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_SKEWED_COL_NAMES` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"SKEWED_COL_NAME\", - \"INTEGER_IDX\" -FROM - \"SKEWED_COL_NAMES\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SKEWED_COL_NAMES -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( - `SD_ID` bigint, - `STRING_LIST_ID_KID` bigint, - `LOCATION` string, - CONSTRAINT `SYS_PK_COL_VALUE_LOC_MAP` PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"STRING_LIST_ID_KID\", - \"LOCATION\" -FROM - \"SKEWED_COL_VALUE_LOC_MAP\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SKEWED_COL_VALUE_LOC_MAP -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( - `SD_ID` bigint, - `STRING_LIST_ID_KID` bigint, - `LOCATION` string, - CONSTRAINT `SYS_PK_COL_VALUE_LOC_MAP` PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"STRING_LIST_ID_KID\", - \"LOCATION\" -FROM - \"SKEWED_COL_VALUE_LOC_MAP\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SKEWED_COL_VALUE_LOC_MAP -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( - `STRING_LIST_ID` bigint, - CONSTRAINT `SYS_PK_SKEWED_STRING_LIST` PRIMARY KEY (`STRING_LIST_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"STRING_LIST_ID\" -FROM - \"SKEWED_STRING_LIST\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SKEWED_STRING_LIST -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( - `STRING_LIST_ID` bigint, - CONSTRAINT `SYS_PK_SKEWED_STRING_LIST` PRIMARY KEY (`STRING_LIST_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"STRING_LIST_ID\" -FROM - \"SKEWED_STRING_LIST\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SKEWED_STRING_LIST -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( - `STRING_LIST_ID` bigint, - `STRING_LIST_VALUE` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_SKEWED_STRING_LIST_VALUES` PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"STRING_LIST_ID\", - \"STRING_LIST_VALUE\", - \"INTEGER_IDX\" -FROM - \"SKEWED_STRING_LIST_VALUES\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SKEWED_STRING_LIST_VALUES -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( - `STRING_LIST_ID` bigint, - `STRING_LIST_VALUE` string, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_SKEWED_STRING_LIST_VALUES` PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"STRING_LIST_ID\", - \"STRING_LIST_VALUE\", - \"INTEGER_IDX\" -FROM - \"SKEWED_STRING_LIST_VALUES\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SKEWED_STRING_LIST_VALUES -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_VALUES` ( - `SD_ID_OID` bigint, - `STRING_LIST_ID_EID` bigint, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_SKEWED_VALUES` PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID_OID\", - \"STRING_LIST_ID_EID\", - \"INTEGER_IDX\" -FROM - \"SKEWED_VALUES\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SKEWED_VALUES -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_VALUES` ( - `SD_ID_OID` bigint, - `STRING_LIST_ID_EID` bigint, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_SKEWED_VALUES` PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID_OID\", - \"STRING_LIST_ID_EID\", - \"INTEGER_IDX\" -FROM - \"SKEWED_VALUES\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SKEWED_VALUES -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SORT_COLS` ( - `SD_ID` bigint, - `COLUMN_NAME` string, - `ORDER` int, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_SORT_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"COLUMN_NAME\", - \"ORDER\", - \"INTEGER_IDX\" -FROM - \"SORT_COLS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@SORT_COLS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SORT_COLS` ( - `SD_ID` bigint, - `COLUMN_NAME` string, - `ORDER` int, - `INTEGER_IDX` int, - CONSTRAINT `SYS_PK_SORT_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"SD_ID\", - \"COLUMN_NAME\", - \"ORDER\", - \"INTEGER_IDX\" -FROM - \"SORT_COLS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@SORT_COLS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TABLE_PARAMS` ( - `TBL_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_TABLE_PARAMS` PRIMARY KEY (`TBL_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"TABLE_PARAMS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@TABLE_PARAMS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TABLE_PARAMS` ( - `TBL_ID` bigint, - `PARAM_KEY` string, - `PARAM_VALUE` string, - CONSTRAINT `SYS_PK_TABLE_PARAMS` PRIMARY KEY (`TBL_ID`,`PARAM_KEY`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_ID\", - \"PARAM_KEY\", - \"PARAM_VALUE\" -FROM - \"TABLE_PARAMS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@TABLE_PARAMS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBLS` ( - `TBL_ID` bigint, - `CREATE_TIME` int, - `DB_ID` bigint, - `LAST_ACCESS_TIME` int, - `OWNER` string, - `RETENTION` int, - `SD_ID` bigint, - `TBL_NAME` string, - `TBL_TYPE` string, - `VIEW_EXPANDED_TEXT` string, - `VIEW_ORIGINAL_TEXT` string, - `IS_REWRITE_ENABLED` boolean, - CONSTRAINT `SYS_PK_TBLS` PRIMARY KEY (`TBL_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_ID\", - \"CREATE_TIME\", - \"DB_ID\", - \"LAST_ACCESS_TIME\", - \"OWNER\", - \"RETENTION\", - \"SD_ID\", - \"TBL_NAME\", - \"TBL_TYPE\", - \"VIEW_EXPANDED_TEXT\", - \"VIEW_ORIGINAL_TEXT\", - \"IS_REWRITE_ENABLED\" -FROM \"TBLS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@TBLS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBLS` ( - `TBL_ID` bigint, - `CREATE_TIME` int, - `DB_ID` bigint, - `LAST_ACCESS_TIME` int, - `OWNER` string, - `RETENTION` int, - `SD_ID` bigint, - `TBL_NAME` string, - `TBL_TYPE` string, - `VIEW_EXPANDED_TEXT` string, - `VIEW_ORIGINAL_TEXT` string, - `IS_REWRITE_ENABLED` boolean, - CONSTRAINT `SYS_PK_TBLS` PRIMARY KEY (`TBL_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_ID\", - \"CREATE_TIME\", - \"DB_ID\", - \"LAST_ACCESS_TIME\", - \"OWNER\", - \"RETENTION\", - \"SD_ID\", - \"TBL_NAME\", - \"TBL_TYPE\", - \"VIEW_EXPANDED_TEXT\", - \"VIEW_ORIGINAL_TEXT\", - \"IS_REWRITE_ENABLED\" -FROM \"TBLS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@TBLS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( - `MV_CREATION_METADATA_ID` bigint, - `DB_NAME` string, - `TBL_NAME` string, - `TXN_LIST` string, - CONSTRAINT `SYS_PK_MV_CREATION_METADATA` PRIMARY KEY (`MV_CREATION_METADATA_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"MV_CREATION_METADATA_ID\", - \"DB_NAME\", - \"TBL_NAME\", - \"TXN_LIST\" -FROM \"MV_CREATION_METADATA\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@MV_CREATION_METADATA -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( - `MV_CREATION_METADATA_ID` bigint, - `DB_NAME` string, - `TBL_NAME` string, - `TXN_LIST` string, - CONSTRAINT `SYS_PK_MV_CREATION_METADATA` PRIMARY KEY (`MV_CREATION_METADATA_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"MV_CREATION_METADATA_ID\", - \"DB_NAME\", - \"TBL_NAME\", - \"TXN_LIST\" -FROM \"MV_CREATION_METADATA\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@MV_CREATION_METADATA -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `MV_TABLES_USED` ( - `MV_CREATION_METADATA_ID` bigint, - `TBL_ID` bigint -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"MV_CREATION_METADATA_ID\", - \"TBL_ID\" -FROM \"MV_TABLES_USED\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@MV_TABLES_USED -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `MV_TABLES_USED` ( - `MV_CREATION_METADATA_ID` bigint, - `TBL_ID` bigint -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"MV_CREATION_METADATA_ID\", - \"TBL_ID\" -FROM \"MV_TABLES_USED\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@MV_TABLES_USED -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( - `TBL_COLUMN_GRANT_ID` bigint, - `COLUMN_NAME` string, - `CREATE_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `TBL_COL_PRIV` string, - `TBL_ID` bigint, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_COLUMN_GRANT_ID\", - \"COLUMN_NAME\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"TBL_COL_PRIV\", - \"TBL_ID\", - \"AUTHORIZER\" -FROM - \"TBL_COL_PRIVS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@TBL_COL_PRIVS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( - `TBL_COLUMN_GRANT_ID` bigint, - `COLUMN_NAME` string, - `CREATE_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `TBL_COL_PRIV` string, - `TBL_ID` bigint, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_COLUMN_GRANT_ID\", - \"COLUMN_NAME\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"TBL_COL_PRIV\", - \"TBL_ID\", - \"AUTHORIZER\" -FROM - \"TBL_COL_PRIVS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@TBL_COL_PRIVS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_PRIVS` ( - `TBL_GRANT_ID` bigint, - `CREATE_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `TBL_PRIV` string, - `TBL_ID` bigint, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_TBL_PRIVS` PRIMARY KEY (`TBL_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_GRANT_ID\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"TBL_PRIV\", - \"TBL_ID\", - \"AUTHORIZER\" -FROM - \"TBL_PRIVS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@TBL_PRIVS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_PRIVS` ( - `TBL_GRANT_ID` bigint, - `CREATE_TIME` int, - `GRANT_OPTION` int, - `GRANTOR` string, - `GRANTOR_TYPE` string, - `PRINCIPAL_NAME` string, - `PRINCIPAL_TYPE` string, - `TBL_PRIV` string, - `TBL_ID` bigint, - `AUTHORIZER` string, - CONSTRAINT `SYS_PK_TBL_PRIVS` PRIMARY KEY (`TBL_GRANT_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"TBL_GRANT_ID\", - \"CREATE_TIME\", - \"GRANT_OPTION\", - \"GRANTOR\", - \"GRANTOR_TYPE\", - \"PRINCIPAL_NAME\", - \"PRINCIPAL_TYPE\", - \"TBL_PRIV\", - \"TBL_ID\", - \"AUTHORIZER\" -FROM - \"TBL_PRIVS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@TBL_PRIVS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( - `CS_ID` bigint, - `DB_NAME` string, - `TABLE_NAME` string, - `COLUMN_NAME` string, - `COLUMN_TYPE` string, - `TBL_ID` bigint, - `LONG_LOW_VALUE` bigint, - `LONG_HIGH_VALUE` bigint, - `DOUBLE_HIGH_VALUE` double, - `DOUBLE_LOW_VALUE` double, - `BIG_DECIMAL_LOW_VALUE` string, - `BIG_DECIMAL_HIGH_VALUE` string, - `NUM_NULLS` bigint, - `NUM_DISTINCTS` bigint, - `AVG_COL_LEN` double, - `MAX_COL_LEN` bigint, - `NUM_TRUES` bigint, - `NUM_FALSES` bigint, - `LAST_ANALYZED` bigint, - CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CS_ID\", - \"DB_NAME\", - \"TABLE_NAME\", - \"COLUMN_NAME\", - \"COLUMN_TYPE\", - \"TBL_ID\", - \"LONG_LOW_VALUE\", - \"LONG_HIGH_VALUE\", - \"DOUBLE_HIGH_VALUE\", - \"DOUBLE_LOW_VALUE\", - \"BIG_DECIMAL_LOW_VALUE\", - \"BIG_DECIMAL_HIGH_VALUE\", - \"NUM_NULLS\", - \"NUM_DISTINCTS\", - \"AVG_COL_LEN\", - \"MAX_COL_LEN\", - \"NUM_TRUES\", - \"NUM_FALSES\", - \"LAST_ANALYZED\" -FROM - \"TAB_COL_STATS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@TAB_COL_STATS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( - `CS_ID` bigint, - `DB_NAME` string, - `TABLE_NAME` string, - `COLUMN_NAME` string, - `COLUMN_TYPE` string, - `TBL_ID` bigint, - `LONG_LOW_VALUE` bigint, - `LONG_HIGH_VALUE` bigint, - `DOUBLE_HIGH_VALUE` double, - `DOUBLE_LOW_VALUE` double, - `BIG_DECIMAL_LOW_VALUE` string, - `BIG_DECIMAL_HIGH_VALUE` string, - `NUM_NULLS` bigint, - `NUM_DISTINCTS` bigint, - `AVG_COL_LEN` double, - `MAX_COL_LEN` bigint, - `NUM_TRUES` bigint, - `NUM_FALSES` bigint, - `LAST_ANALYZED` bigint, - CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CS_ID\", - \"DB_NAME\", - \"TABLE_NAME\", - \"COLUMN_NAME\", - \"COLUMN_TYPE\", - \"TBL_ID\", - \"LONG_LOW_VALUE\", - \"LONG_HIGH_VALUE\", - \"DOUBLE_HIGH_VALUE\", - \"DOUBLE_LOW_VALUE\", - \"BIG_DECIMAL_LOW_VALUE\", - \"BIG_DECIMAL_HIGH_VALUE\", - \"NUM_NULLS\", - \"NUM_DISTINCTS\", - \"AVG_COL_LEN\", - \"MAX_COL_LEN\", - \"NUM_TRUES\", - \"NUM_FALSES\", - \"LAST_ANALYZED\" -FROM - \"TAB_COL_STATS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@TAB_COL_STATS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( - `CS_ID` bigint, - `DB_NAME` string, - `TABLE_NAME` string, - `PARTITION_NAME` string, - `COLUMN_NAME` string, - `COLUMN_TYPE` string, - `PART_ID` bigint, - `LONG_LOW_VALUE` bigint, - `LONG_HIGH_VALUE` bigint, - `DOUBLE_HIGH_VALUE` double, - `DOUBLE_LOW_VALUE` double, - `BIG_DECIMAL_LOW_VALUE` string, - `BIG_DECIMAL_HIGH_VALUE` string, - `NUM_NULLS` bigint, - `NUM_DISTINCTS` bigint, - `AVG_COL_LEN` double, - `MAX_COL_LEN` bigint, - `NUM_TRUES` bigint, - `NUM_FALSES` bigint, - `LAST_ANALYZED` bigint, - CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CS_ID\", - \"DB_NAME\", - \"TABLE_NAME\", - \"PARTITION_NAME\", - \"COLUMN_NAME\", - \"COLUMN_TYPE\", - \"PART_ID\", - \"LONG_LOW_VALUE\", - \"LONG_HIGH_VALUE\", - \"DOUBLE_HIGH_VALUE\", - \"DOUBLE_LOW_VALUE\", - \"BIG_DECIMAL_LOW_VALUE\", - \"BIG_DECIMAL_HIGH_VALUE\", - \"NUM_NULLS\", - \"NUM_DISTINCTS\", - \"AVG_COL_LEN\", - \"MAX_COL_LEN\", - \"NUM_TRUES\", - \"NUM_FALSES\", - \"LAST_ANALYZED\" -FROM - \"PART_COL_STATS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@PART_COL_STATS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( - `CS_ID` bigint, - `DB_NAME` string, - `TABLE_NAME` string, - `PARTITION_NAME` string, - `COLUMN_NAME` string, - `COLUMN_TYPE` string, - `PART_ID` bigint, - `LONG_LOW_VALUE` bigint, - `LONG_HIGH_VALUE` bigint, - `DOUBLE_HIGH_VALUE` double, - `DOUBLE_LOW_VALUE` double, - `BIG_DECIMAL_LOW_VALUE` string, - `BIG_DECIMAL_HIGH_VALUE` string, - `NUM_NULLS` bigint, - `NUM_DISTINCTS` bigint, - `AVG_COL_LEN` double, - `MAX_COL_LEN` bigint, - `NUM_TRUES` bigint, - `NUM_FALSES` bigint, - `LAST_ANALYZED` bigint, - CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CS_ID\", - \"DB_NAME\", - \"TABLE_NAME\", - \"PARTITION_NAME\", - \"COLUMN_NAME\", - \"COLUMN_TYPE\", - \"PART_ID\", - \"LONG_LOW_VALUE\", - \"LONG_HIGH_VALUE\", - \"DOUBLE_HIGH_VALUE\", - \"DOUBLE_LOW_VALUE\", - \"BIG_DECIMAL_LOW_VALUE\", - \"BIG_DECIMAL_HIGH_VALUE\", - \"NUM_NULLS\", - \"NUM_DISTINCTS\", - \"AVG_COL_LEN\", - \"MAX_COL_LEN\", - \"NUM_TRUES\", - \"NUM_FALSES\", - \"LAST_ANALYZED\" -FROM - \"PART_COL_STATS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@PART_COL_STATS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.0.0' AS `SCHEMA_VERSION`, - 'Hive release version 4.0.0' AS `VERSION_COMMENT` -PREHOOK: type: CREATEVIEW -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: SYS@VERSION -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.0.0' AS `SCHEMA_VERSION`, - 'Hive release version 4.0.0' AS `VERSION_COMMENT` -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: SYS@VERSION -POSTHOOK: Output: database:sys -POSTHOOK: Lineage: VERSION.schema_version SIMPLE [] -POSTHOOK: Lineage: VERSION.ver_id SIMPLE [] -POSTHOOK: Lineage: VERSION.version_comment SIMPLE [] -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DB_VERSION` ( - `VER_ID` BIGINT, - `SCHEMA_VERSION` string, - `VERSION_COMMENT` string, - CONSTRAINT `SYS_PK_DB_VERSION` PRIMARY KEY (`VER_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"VER_ID\", - \"SCHEMA_VERSION\", - \"VERSION_COMMENT\" -FROM - \"VERSION\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@DB_VERSION -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DB_VERSION` ( - `VER_ID` BIGINT, - `SCHEMA_VERSION` string, - `VERSION_COMMENT` string, - CONSTRAINT `SYS_PK_DB_VERSION` PRIMARY KEY (`VER_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"VER_ID\", - \"SCHEMA_VERSION\", - \"VERSION_COMMENT\" -FROM - \"VERSION\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@DB_VERSION -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `FUNCS` ( - `FUNC_ID` bigint, - `CLASS_NAME` string, - `CREATE_TIME` int, - `DB_ID` bigint, - `FUNC_NAME` string, - `FUNC_TYPE` int, - `OWNER_NAME` string, - `OWNER_TYPE` string, - CONSTRAINT `SYS_PK_FUNCS` PRIMARY KEY (`FUNC_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"FUNC_ID\", - \"CLASS_NAME\", - \"CREATE_TIME\", - \"DB_ID\", - \"FUNC_NAME\", - \"FUNC_TYPE\", - \"OWNER_NAME\", - \"OWNER_TYPE\" -FROM - \"FUNCS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@FUNCS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `FUNCS` ( - `FUNC_ID` bigint, - `CLASS_NAME` string, - `CREATE_TIME` int, - `DB_ID` bigint, - `FUNC_NAME` string, - `FUNC_TYPE` int, - `OWNER_NAME` string, - `OWNER_TYPE` string, - CONSTRAINT `SYS_PK_FUNCS` PRIMARY KEY (`FUNC_ID`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"FUNC_ID\", - \"CLASS_NAME\", - \"CREATE_TIME\", - \"DB_ID\", - \"FUNC_NAME\", - \"FUNC_TYPE\", - \"OWNER_NAME\", - \"OWNER_TYPE\" -FROM - \"FUNCS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@FUNCS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `KEY_CONSTRAINTS` -( - `CHILD_CD_ID` bigint, - `CHILD_INTEGER_IDX` int, - `CHILD_TBL_ID` bigint, - `PARENT_CD_ID` bigint, - `PARENT_INTEGER_IDX` int, - `PARENT_TBL_ID` bigint, - `POSITION` bigint, - `CONSTRAINT_NAME` string, - `CONSTRAINT_TYPE` string, - `UPDATE_RULE` string, - `DELETE_RULE` string, - `ENABLE_VALIDATE_RELY` int, - `DEFAULT_VALUE` string, - CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CHILD_CD_ID\", - \"CHILD_INTEGER_IDX\", - \"CHILD_TBL_ID\", - \"PARENT_CD_ID\", - \"PARENT_INTEGER_IDX\", - \"PARENT_TBL_ID\", - \"POSITION\", - \"CONSTRAINT_NAME\", - \"CONSTRAINT_TYPE\", - \"UPDATE_RULE\", - \"DELETE_RULE\", - \"ENABLE_VALIDATE_RELY\", - \"DEFAULT_VALUE\" -FROM - \"KEY_CONSTRAINTS\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@KEY_CONSTRAINTS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `KEY_CONSTRAINTS` -( - `CHILD_CD_ID` bigint, - `CHILD_INTEGER_IDX` int, - `CHILD_TBL_ID` bigint, - `PARENT_CD_ID` bigint, - `PARENT_INTEGER_IDX` int, - `PARENT_TBL_ID` bigint, - `POSITION` bigint, - `CONSTRAINT_NAME` string, - `CONSTRAINT_TYPE` string, - `UPDATE_RULE` string, - `DELETE_RULE` string, - `ENABLE_VALIDATE_RELY` int, - `DEFAULT_VALUE` string, - CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) DISABLE -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"CHILD_CD_ID\", - \"CHILD_INTEGER_IDX\", - \"CHILD_TBL_ID\", - \"PARENT_CD_ID\", - \"PARENT_INTEGER_IDX\", - \"PARENT_TBL_ID\", - \"POSITION\", - \"CONSTRAINT_NAME\", - \"CONSTRAINT_TYPE\", - \"UPDATE_RULE\", - \"DELETE_RULE\", - \"ENABLE_VALIDATE_RELY\", - \"DEFAULT_VALUE\" -FROM - \"KEY_CONSTRAINTS\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@KEY_CONSTRAINTS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE OR REPLACE VIEW `TABLE_STATS_VIEW` AS -SELECT - `TBL_ID`, - max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, - max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, - max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, - max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, - max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, -#### A masked pattern was here #### -FROM `TABLE_PARAMS` GROUP BY `TBL_ID` -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@table_params -PREHOOK: Output: SYS@TABLE_STATS_VIEW -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE OR REPLACE VIEW `TABLE_STATS_VIEW` AS -SELECT - `TBL_ID`, - max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, - max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, - max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, - max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, - max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, -#### A masked pattern was here #### -FROM `TABLE_PARAMS` GROUP BY `TBL_ID` -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@table_params -POSTHOOK: Output: SYS@TABLE_STATS_VIEW -POSTHOOK: Output: database:sys -POSTHOOK: Lineage: TABLE_STATS_VIEW.column_stats_accurate EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_STATS_VIEW.num_files EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_STATS_VIEW.num_rows EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_STATS_VIEW.raw_data_size EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_STATS_VIEW.tbl_id SIMPLE [(table_params)table_params.FieldSchema(name:tbl_id, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_STATS_VIEW.total_size EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_STATS_VIEW.transient_last_ddl_time EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -PREHOOK: query: CREATE OR REPLACE VIEW `PARTITION_STATS_VIEW` AS -SELECT - `PART_ID`, - max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, - max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, - max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, - max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, - max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, -#### A masked pattern was here #### -FROM `PARTITION_PARAMS` GROUP BY `PART_ID` -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@partition_params -PREHOOK: Output: SYS@PARTITION_STATS_VIEW -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE OR REPLACE VIEW `PARTITION_STATS_VIEW` AS -SELECT - `PART_ID`, - max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, - max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, - max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, - max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, - max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, -#### A masked pattern was here #### -FROM `PARTITION_PARAMS` GROUP BY `PART_ID` -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@partition_params -POSTHOOK: Output: SYS@PARTITION_STATS_VIEW -POSTHOOK: Output: database:sys -POSTHOOK: Lineage: PARTITION_STATS_VIEW.column_stats_accurate EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: PARTITION_STATS_VIEW.num_files EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: PARTITION_STATS_VIEW.num_rows EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: PARTITION_STATS_VIEW.part_id SIMPLE [(partition_params)partition_params.FieldSchema(name:part_id, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: PARTITION_STATS_VIEW.raw_data_size EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: PARTITION_STATS_VIEW.total_size EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: PARTITION_STATS_VIEW.transient_last_ddl_time EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( - `NAME` string, - `NS` string, - `STATUS` string, - `QUERY_PARALLELISM` int, - `DEFAULT_POOL_PATH` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"WM_RESOURCEPLAN\".\"NAME\", - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - \"STATUS\", - \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\", - \"WM_POOL\".\"PATH\" -FROM - \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@WM_RESOURCEPLANS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( - `NAME` string, - `NS` string, - `STATUS` string, - `QUERY_PARALLELISM` int, - `DEFAULT_POOL_PATH` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"WM_RESOURCEPLAN\".\"NAME\", - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - \"STATUS\", - \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\", - \"WM_POOL\".\"PATH\" -FROM - \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@WM_RESOURCEPLANS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_TRIGGERS` ( - `RP_NAME` string, - `NS` string, - `NAME` string, - `TRIGGER_EXPRESSION` string, - `ACTION_EXPRESSION` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - r.\"NAME\" AS RP_NAME, - case when r.\"NS\" is null then 'default' else r.\"NS\" end, - t.\"NAME\" AS NAME, - \"TRIGGER_EXPRESSION\", - \"ACTION_EXPRESSION\" -FROM - \"WM_TRIGGER\" t -JOIN - \"WM_RESOURCEPLAN\" r -ON - t.\"RP_ID\" = r.\"RP_ID\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@WM_TRIGGERS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_TRIGGERS` ( - `RP_NAME` string, - `NS` string, - `NAME` string, - `TRIGGER_EXPRESSION` string, - `ACTION_EXPRESSION` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - r.\"NAME\" AS RP_NAME, - case when r.\"NS\" is null then 'default' else r.\"NS\" end, - t.\"NAME\" AS NAME, - \"TRIGGER_EXPRESSION\", - \"ACTION_EXPRESSION\" -FROM - \"WM_TRIGGER\" t -JOIN - \"WM_RESOURCEPLAN\" r -ON - t.\"RP_ID\" = r.\"RP_ID\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@WM_TRIGGERS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS` ( - `RP_NAME` string, - `NS` string, - `PATH` string, - `ALLOC_FRACTION` double, - `QUERY_PARALLELISM` int, - `SCHEDULING_POLICY` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"WM_RESOURCEPLAN\".\"NAME\", - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - \"WM_POOL\".\"PATH\", - \"WM_POOL\".\"ALLOC_FRACTION\", - \"WM_POOL\".\"QUERY_PARALLELISM\", - \"WM_POOL\".\"SCHEDULING_POLICY\" -FROM - \"WM_POOL\" -JOIN - \"WM_RESOURCEPLAN\" -ON - \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@WM_POOLS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS` ( - `RP_NAME` string, - `NS` string, - `PATH` string, - `ALLOC_FRACTION` double, - `QUERY_PARALLELISM` int, - `SCHEDULING_POLICY` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"WM_RESOURCEPLAN\".\"NAME\", - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - \"WM_POOL\".\"PATH\", - \"WM_POOL\".\"ALLOC_FRACTION\", - \"WM_POOL\".\"QUERY_PARALLELISM\", - \"WM_POOL\".\"SCHEDULING_POLICY\" -FROM - \"WM_POOL\" -JOIN - \"WM_RESOURCEPLAN\" -ON - \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@WM_POOLS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` ( - `RP_NAME` string, - `NS` string, - `POOL_PATH` string, - `TRIGGER_NAME` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - \"WM_POOL\".\"PATH\" AS POOL_PATH, - \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME -FROM \"WM_POOL_TO_TRIGGER\" - JOIN \"WM_POOL\" ON \"WM_POOL_TO_TRIGGER\".\"POOL_ID\" = \"WM_POOL\".\"POOL_ID\" - JOIN \"WM_TRIGGER\" ON \"WM_POOL_TO_TRIGGER\".\"TRIGGER_ID\" = \"WM_TRIGGER\".\"TRIGGER_ID\" - JOIN \"WM_RESOURCEPLAN\" ON \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" -UNION -SELECT - \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - '' AS POOL_PATH, - \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME -FROM \"WM_TRIGGER\" - JOIN \"WM_RESOURCEPLAN\" ON \"WM_TRIGGER\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" -WHERE CAST(\"WM_TRIGGER\".\"IS_IN_UNMANAGED\" AS CHAR) IN ('1', 't') -" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@WM_POOLS_TO_TRIGGERS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` ( - `RP_NAME` string, - `NS` string, - `POOL_PATH` string, - `TRIGGER_NAME` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - \"WM_POOL\".\"PATH\" AS POOL_PATH, - \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME -FROM \"WM_POOL_TO_TRIGGER\" - JOIN \"WM_POOL\" ON \"WM_POOL_TO_TRIGGER\".\"POOL_ID\" = \"WM_POOL\".\"POOL_ID\" - JOIN \"WM_TRIGGER\" ON \"WM_POOL_TO_TRIGGER\".\"TRIGGER_ID\" = \"WM_TRIGGER\".\"TRIGGER_ID\" - JOIN \"WM_RESOURCEPLAN\" ON \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" -UNION -SELECT - \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - '' AS POOL_PATH, - \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME -FROM \"WM_TRIGGER\" - JOIN \"WM_RESOURCEPLAN\" ON \"WM_TRIGGER\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" -WHERE CAST(\"WM_TRIGGER\".\"IS_IN_UNMANAGED\" AS CHAR) IN ('1', 't') -" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@WM_POOLS_TO_TRIGGERS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_MAPPINGS` ( - `RP_NAME` string, - `NS` string, - `ENTITY_TYPE` string, - `ENTITY_NAME` string, - `POOL_PATH` string, - `ORDERING` int -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"WM_RESOURCEPLAN\".\"NAME\", - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - \"ENTITY_TYPE\", - \"ENTITY_NAME\", - case when \"WM_POOL\".\"PATH\" is null then '' else \"WM_POOL\".\"PATH\" end, - \"ORDERING\" -FROM \"WM_MAPPING\" -JOIN \"WM_RESOURCEPLAN\" ON \"WM_MAPPING\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" -LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_ID\" -" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@WM_MAPPINGS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_MAPPINGS` ( - `RP_NAME` string, - `NS` string, - `ENTITY_TYPE` string, - `ENTITY_NAME` string, - `POOL_PATH` string, - `ORDERING` int -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"WM_RESOURCEPLAN\".\"NAME\", - case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, - \"ENTITY_TYPE\", - \"ENTITY_NAME\", - case when \"WM_POOL\".\"PATH\" is null then '' else \"WM_POOL\".\"PATH\" end, - \"ORDERING\" -FROM \"WM_MAPPING\" -JOIN \"WM_RESOURCEPLAN\" ON \"WM_MAPPING\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" -LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_ID\" -" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@WM_MAPPINGS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` ( - `CQ_ID` bigint, - `CQ_DATABASE` string, - `CQ_TABLE` string, - `CQ_PARTITION` string, - `CQ_STATE` string, - `CQ_TYPE` string, - `CQ_TBLPROPERTIES` string, - `CQ_WORKER_ID` string, - `CQ_START` bigint, - `CQ_RUN_AS` string, - `CQ_HIGHEST_WRITE_ID` bigint, - `CQ_HADOOP_JOB_ID` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"COMPACTION_QUEUE\".\"CQ_ID\", - \"COMPACTION_QUEUE\".\"CQ_DATABASE\", - \"COMPACTION_QUEUE\".\"CQ_TABLE\", - \"COMPACTION_QUEUE\".\"CQ_PARTITION\", - \"COMPACTION_QUEUE\".\"CQ_STATE\", - \"COMPACTION_QUEUE\".\"CQ_TYPE\", - \"COMPACTION_QUEUE\".\"CQ_TBLPROPERTIES\", - \"COMPACTION_QUEUE\".\"CQ_WORKER_ID\", - \"COMPACTION_QUEUE\".\"CQ_START\", - \"COMPACTION_QUEUE\".\"CQ_RUN_AS\", - \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\", - \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\" -FROM \"COMPACTION_QUEUE\" -" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@COMPACTION_QUEUE -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` ( - `CQ_ID` bigint, - `CQ_DATABASE` string, - `CQ_TABLE` string, - `CQ_PARTITION` string, - `CQ_STATE` string, - `CQ_TYPE` string, - `CQ_TBLPROPERTIES` string, - `CQ_WORKER_ID` string, - `CQ_START` bigint, - `CQ_RUN_AS` string, - `CQ_HIGHEST_WRITE_ID` bigint, - `CQ_HADOOP_JOB_ID` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"COMPACTION_QUEUE\".\"CQ_ID\", - \"COMPACTION_QUEUE\".\"CQ_DATABASE\", - \"COMPACTION_QUEUE\".\"CQ_TABLE\", - \"COMPACTION_QUEUE\".\"CQ_PARTITION\", - \"COMPACTION_QUEUE\".\"CQ_STATE\", - \"COMPACTION_QUEUE\".\"CQ_TYPE\", - \"COMPACTION_QUEUE\".\"CQ_TBLPROPERTIES\", - \"COMPACTION_QUEUE\".\"CQ_WORKER_ID\", - \"COMPACTION_QUEUE\".\"CQ_START\", - \"COMPACTION_QUEUE\".\"CQ_RUN_AS\", - \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\", - \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\" -FROM \"COMPACTION_QUEUE\" -" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@COMPACTION_QUEUE -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` ( - `CC_ID` bigint, - `CC_DATABASE` string, - `CC_TABLE` string, - `CC_PARTITION` string, - `CC_STATE` string, - `CC_TYPE` string, - `CC_TBLPROPERTIES` string, - `CC_WORKER_ID` string, - `CC_START` bigint, - `CC_END` bigint, - `CC_RUN_AS` string, - `CC_HIGHEST_WRITE_ID` bigint, - `CC_HADOOP_JOB_ID` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"COMPLETED_COMPACTIONS\".\"CC_ID\", - \"COMPLETED_COMPACTIONS\".\"CC_DATABASE\", - \"COMPLETED_COMPACTIONS\".\"CC_TABLE\", - \"COMPLETED_COMPACTIONS\".\"CC_PARTITION\", - \"COMPLETED_COMPACTIONS\".\"CC_STATE\", - \"COMPLETED_COMPACTIONS\".\"CC_TYPE\", - \"COMPLETED_COMPACTIONS\".\"CC_TBLPROPERTIES\", - \"COMPLETED_COMPACTIONS\".\"CC_WORKER_ID\", - \"COMPLETED_COMPACTIONS\".\"CC_START\", - \"COMPLETED_COMPACTIONS\".\"CC_END\", - \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\", - \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\", - \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\" -FROM \"COMPLETED_COMPACTIONS\" -" -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: SYS@COMPLETED_COMPACTIONS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` ( - `CC_ID` bigint, - `CC_DATABASE` string, - `CC_TABLE` string, - `CC_PARTITION` string, - `CC_STATE` string, - `CC_TYPE` string, - `CC_TBLPROPERTIES` string, - `CC_WORKER_ID` string, - `CC_START` bigint, - `CC_END` bigint, - `CC_RUN_AS` string, - `CC_HIGHEST_WRITE_ID` bigint, - `CC_HADOOP_JOB_ID` string -) -STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' -TBLPROPERTIES ( -"hive.sql.database.type" = "METASTORE", -"hive.sql.query" = -"SELECT - \"COMPLETED_COMPACTIONS\".\"CC_ID\", - \"COMPLETED_COMPACTIONS\".\"CC_DATABASE\", - \"COMPLETED_COMPACTIONS\".\"CC_TABLE\", - \"COMPLETED_COMPACTIONS\".\"CC_PARTITION\", - \"COMPLETED_COMPACTIONS\".\"CC_STATE\", - \"COMPLETED_COMPACTIONS\".\"CC_TYPE\", - \"COMPLETED_COMPACTIONS\".\"CC_TBLPROPERTIES\", - \"COMPLETED_COMPACTIONS\".\"CC_WORKER_ID\", - \"COMPLETED_COMPACTIONS\".\"CC_START\", - \"COMPLETED_COMPACTIONS\".\"CC_END\", - \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\", - \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\", - \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\" -FROM \"COMPLETED_COMPACTIONS\" -" -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: SYS@COMPLETED_COMPACTIONS -POSTHOOK: Output: database:sys -PREHOOK: query: CREATE OR REPLACE VIEW `COMPACTIONS` -( - `C_ID`, - `C_CATALOG`, - `C_DATABASE`, - `C_TABLE`, - `C_PARTITION`, - `C_TYPE`, - `C_STATE`, - `C_HOSTNAME`, - `C_WORKER_ID`, - `C_START`, - `C_DURATION`, - `C_HADOOP_JOB_ID`, - `C_RUN_AS`, - `C_HIGHEST_WRITE_ID` -) AS -SELECT - CC_ID, - 'default', - CC_DATABASE, - CC_TABLE, - CC_PARTITION, - CASE WHEN CC_TYPE = 'i' THEN 'minor' WHEN CC_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, - CASE WHEN CC_STATE = 'f' THEN 'failed' WHEN CC_STATE = 's' THEN 'succeeded' WHEN CC_STATE = 'a' THEN 'attempted' ELSE 'UNKNOWN' END, - CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[0] END, - CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[1] END, - CC_START, - CASE WHEN CC_END IS NULL THEN cast (null as string) ELSE CC_END-CC_START END, - CC_HADOOP_JOB_ID, - CC_RUN_AS, - CC_HIGHEST_WRITE_ID -FROM COMPLETED_COMPACTIONS -UNION ALL -SELECT - CQ_ID, - 'default', - CQ_DATABASE, - CQ_TABLE, - CQ_PARTITION, - CASE WHEN CQ_TYPE = 'i' THEN 'minor' WHEN CQ_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, - CASE WHEN CQ_STATE = 'i' THEN 'initiated' WHEN CQ_STATE = 'w' THEN 'working' WHEN CQ_STATE = 'r' THEN 'ready for cleaning' ELSE 'UNKNOWN' END, - CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[0] END, - CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[1] END, - CQ_START, - cast (null as string), - CQ_HADOOP_JOB_ID, - CQ_RUN_AS, - CQ_HIGHEST_WRITE_ID -FROM COMPACTION_QUEUE -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@compaction_queue -PREHOOK: Input: sys@completed_compactions -PREHOOK: Output: SYS@COMPACTIONS -PREHOOK: Output: database:sys -POSTHOOK: query: CREATE OR REPLACE VIEW `COMPACTIONS` -( - `C_ID`, - `C_CATALOG`, - `C_DATABASE`, - `C_TABLE`, - `C_PARTITION`, - `C_TYPE`, - `C_STATE`, - `C_HOSTNAME`, - `C_WORKER_ID`, - `C_START`, - `C_DURATION`, - `C_HADOOP_JOB_ID`, - `C_RUN_AS`, - `C_HIGHEST_WRITE_ID` -) AS -SELECT - CC_ID, - 'default', - CC_DATABASE, - CC_TABLE, - CC_PARTITION, - CASE WHEN CC_TYPE = 'i' THEN 'minor' WHEN CC_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, - CASE WHEN CC_STATE = 'f' THEN 'failed' WHEN CC_STATE = 's' THEN 'succeeded' WHEN CC_STATE = 'a' THEN 'attempted' ELSE 'UNKNOWN' END, - CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[0] END, - CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[1] END, - CC_START, - CASE WHEN CC_END IS NULL THEN cast (null as string) ELSE CC_END-CC_START END, - CC_HADOOP_JOB_ID, - CC_RUN_AS, - CC_HIGHEST_WRITE_ID -FROM COMPLETED_COMPACTIONS -UNION ALL -SELECT - CQ_ID, - 'default', - CQ_DATABASE, - CQ_TABLE, - CQ_PARTITION, - CASE WHEN CQ_TYPE = 'i' THEN 'minor' WHEN CQ_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, - CASE WHEN CQ_STATE = 'i' THEN 'initiated' WHEN CQ_STATE = 'w' THEN 'working' WHEN CQ_STATE = 'r' THEN 'ready for cleaning' ELSE 'UNKNOWN' END, - CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[0] END, - CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[1] END, - CQ_START, - cast (null as string), - CQ_HADOOP_JOB_ID, - CQ_RUN_AS, - CQ_HIGHEST_WRITE_ID -FROM COMPACTION_QUEUE -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@compaction_queue -POSTHOOK: Input: sys@completed_compactions -POSTHOOK: Output: SYS@COMPACTIONS -POSTHOOK: Output: database:sys -POSTHOOK: Lineage: COMPACTIONS.c_catalog EXPRESSION [] -POSTHOOK: Lineage: COMPACTIONS.c_database EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_database, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_database, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_duration EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_end, type:bigint, comment:from deserializer), (completed_compactions)completed_compactions.FieldSchema(name:cc_start, type:bigint, comment:from deserializer), ] -#### A masked pattern was here #### -POSTHOOK: Lineage: COMPACTIONS.c_highest_write_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_highest_write_id, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_highest_write_id, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_hostname EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_worker_id, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_worker_id, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_id, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_id, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_partition EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_partition, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_partition, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_run_as EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_run_as, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_run_as, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_start EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_start, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_start, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_state EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_state, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_state, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_table EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_table, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_table, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_type EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_type, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_type, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_worker_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_worker_id, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_worker_id, type:string, comment:from deserializer), ] -PREHOOK: query: CREATE DATABASE IF NOT EXISTS INFORMATION_SCHEMA -PREHOOK: type: CREATEDATABASE -PREHOOK: Output: database:INFORMATION_SCHEMA -POSTHOOK: query: CREATE DATABASE IF NOT EXISTS INFORMATION_SCHEMA -POSTHOOK: type: CREATEDATABASE -POSTHOOK: Output: database:INFORMATION_SCHEMA -PREHOOK: query: USE INFORMATION_SCHEMA -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:information_schema -POSTHOOK: query: USE INFORMATION_SCHEMA -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:information_schema -PREHOOK: query: CREATE OR REPLACE VIEW `SCHEMATA` -( - `CATALOG_NAME`, - `SCHEMA_NAME`, - `SCHEMA_OWNER`, - `DEFAULT_CHARACTER_SET_CATALOG`, - `DEFAULT_CHARACTER_SET_SCHEMA`, - `DEFAULT_CHARACTER_SET_NAME`, - `SQL_PATH` -) AS -SELECT DISTINCT - 'default', - D.`NAME`, - D.`OWNER_NAME`, - cast(null as string), - cast(null as string), - cast(null as string), - `DB_LOCATION_URI` -FROM - `sys`.`DBS` D LEFT JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND current_authorizer() = P.`AUTHORIZER` -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@dbs -PREHOOK: Input: sys@tbl_privs -PREHOOK: Input: sys@tbls -PREHOOK: Output: INFORMATION_SCHEMA@SCHEMATA -PREHOOK: Output: database:information_schema -POSTHOOK: query: CREATE OR REPLACE VIEW `SCHEMATA` -( - `CATALOG_NAME`, - `SCHEMA_NAME`, - `SCHEMA_OWNER`, - `DEFAULT_CHARACTER_SET_CATALOG`, - `DEFAULT_CHARACTER_SET_SCHEMA`, - `DEFAULT_CHARACTER_SET_NAME`, - `SQL_PATH` -) AS -SELECT DISTINCT - 'default', - D.`NAME`, - D.`OWNER_NAME`, - cast(null as string), - cast(null as string), - cast(null as string), - `DB_LOCATION_URI` -FROM - `sys`.`DBS` D LEFT JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND current_authorizer() = P.`AUTHORIZER` -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@dbs -POSTHOOK: Input: sys@tbl_privs -POSTHOOK: Input: sys@tbls -POSTHOOK: Output: INFORMATION_SCHEMA@SCHEMATA -POSTHOOK: Output: database:information_schema -POSTHOOK: Lineage: SCHEMATA.catalog_name SIMPLE [] -POSTHOOK: Lineage: SCHEMATA.default_character_set_catalog EXPRESSION [] -POSTHOOK: Lineage: SCHEMATA.default_character_set_name EXPRESSION [] -POSTHOOK: Lineage: SCHEMATA.default_character_set_schema EXPRESSION [] -POSTHOOK: Lineage: SCHEMATA.schema_name SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] -#### A masked pattern was here #### -POSTHOOK: Lineage: SCHEMATA.sql_path SIMPLE [(dbs)d.FieldSchema(name:db_location_uri, type:string, comment:from deserializer), ] -PREHOOK: query: CREATE OR REPLACE VIEW `TABLES` -( - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `TABLE_TYPE`, - `SELF_REFERENCING_COLUMN_NAME`, - `REFERENCE_GENERATION`, - `USER_DEFINED_TYPE_CATALOG`, - `USER_DEFINED_TYPE_SCHEMA`, - `USER_DEFINED_TYPE_NAME`, - `IS_INSERTABLE_INTO`, - `IS_TYPED`, - `COMMIT_ACTION` -) AS -SELECT DISTINCT - 'default', - D.NAME, - T.TBL_NAME, - IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), - cast(null as string), - cast(null as string), - cast(null as string), - cast(null as string), - cast(null as string), - IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), - 'NO', - cast(null as string) -FROM - `sys`.`TBLS` T JOIN `sys`.`DBS` D ON (D.`DB_ID` = T.`DB_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@dbs -PREHOOK: Input: sys@tbl_privs -PREHOOK: Input: sys@tbls -PREHOOK: Output: INFORMATION_SCHEMA@TABLES -PREHOOK: Output: database:information_schema -POSTHOOK: query: CREATE OR REPLACE VIEW `TABLES` -( - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `TABLE_TYPE`, - `SELF_REFERENCING_COLUMN_NAME`, - `REFERENCE_GENERATION`, - `USER_DEFINED_TYPE_CATALOG`, - `USER_DEFINED_TYPE_SCHEMA`, - `USER_DEFINED_TYPE_NAME`, - `IS_INSERTABLE_INTO`, - `IS_TYPED`, - `COMMIT_ACTION` -) AS -SELECT DISTINCT - 'default', - D.NAME, - T.TBL_NAME, - IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), - cast(null as string), - cast(null as string), - cast(null as string), - cast(null as string), - cast(null as string), - IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), - 'NO', - cast(null as string) -FROM - `sys`.`TBLS` T JOIN `sys`.`DBS` D ON (D.`DB_ID` = T.`DB_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@dbs -POSTHOOK: Input: sys@tbl_privs -POSTHOOK: Input: sys@tbls -POSTHOOK: Output: INFORMATION_SCHEMA@TABLES -POSTHOOK: Output: database:information_schema -POSTHOOK: Lineage: TABLES.commit_action EXPRESSION [] -POSTHOOK: Lineage: TABLES.is_insertable_into EXPRESSION [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLES.is_typed SIMPLE [] -POSTHOOK: Lineage: TABLES.reference_generation EXPRESSION [] -POSTHOOK: Lineage: TABLES.self_referencing_column_name EXPRESSION [] -POSTHOOK: Lineage: TABLES.table_catalog SIMPLE [] -POSTHOOK: Lineage: TABLES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLES.table_type EXPRESSION [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLES.user_defined_type_catalog EXPRESSION [] -POSTHOOK: Lineage: TABLES.user_defined_type_name EXPRESSION [] -POSTHOOK: Lineage: TABLES.user_defined_type_schema EXPRESSION [] -PREHOOK: query: CREATE OR REPLACE VIEW `TABLE_PRIVILEGES` -( - `GRANTOR`, - `GRANTEE`, - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `PRIVILEGE_TYPE`, - `IS_GRANTABLE`, - `WITH_HIERARCHY` -) AS -SELECT DISTINCT - P.`GRANTOR`, - P.`PRINCIPAL_NAME`, - 'default', - D.`NAME`, - T.`TBL_NAME`, - P.`TBL_PRIV`, - IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), - 'NO' -FROM - `sys`.`TBL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) - JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR - (P2.`TBL_ID` IS NOT NULL AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` - AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) - AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER` = current_authorizer() AND P2.`AUTHORIZER` = current_authorizer()) -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@dbs -PREHOOK: Input: sys@tbl_privs -PREHOOK: Input: sys@tbls -PREHOOK: Output: INFORMATION_SCHEMA@TABLE_PRIVILEGES -PREHOOK: Output: database:information_schema -POSTHOOK: query: CREATE OR REPLACE VIEW `TABLE_PRIVILEGES` -( - `GRANTOR`, - `GRANTEE`, - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `PRIVILEGE_TYPE`, - `IS_GRANTABLE`, - `WITH_HIERARCHY` -) AS -SELECT DISTINCT - P.`GRANTOR`, - P.`PRINCIPAL_NAME`, - 'default', - D.`NAME`, - T.`TBL_NAME`, - P.`TBL_PRIV`, - IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), - 'NO' -FROM - `sys`.`TBL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) - JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR - (P2.`TBL_ID` IS NOT NULL AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` - AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) - AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER` = current_authorizer() AND P2.`AUTHORIZER` = current_authorizer()) -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@dbs -POSTHOOK: Input: sys@tbl_privs -POSTHOOK: Input: sys@tbls -POSTHOOK: Output: INFORMATION_SCHEMA@TABLE_PRIVILEGES -POSTHOOK: Output: database:information_schema -POSTHOOK: Lineage: TABLE_PRIVILEGES.grantee SIMPLE [(tbl_privs)p.FieldSchema(name:principal_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_PRIVILEGES.grantor SIMPLE [(tbl_privs)p.FieldSchema(name:grantor, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_PRIVILEGES.is_grantable EXPRESSION [(tbl_privs)p.FieldSchema(name:grant_option, type:int, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_PRIVILEGES.privilege_type SIMPLE [(tbl_privs)p.FieldSchema(name:tbl_priv, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_PRIVILEGES.table_catalog SIMPLE [] -POSTHOOK: Lineage: TABLE_PRIVILEGES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_PRIVILEGES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: TABLE_PRIVILEGES.with_hierarchy SIMPLE [] -PREHOOK: query: CREATE OR REPLACE VIEW `COLUMNS` -( - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `COLUMN_NAME`, - `ORDINAL_POSITION`, - `COLUMN_DEFAULT`, - `IS_NULLABLE`, - `DATA_TYPE`, - `CHARACTER_MAXIMUM_LENGTH`, - `CHARACTER_OCTET_LENGTH`, - `NUMERIC_PRECISION`, - `NUMERIC_PRECISION_RADIX`, - `NUMERIC_SCALE`, - `DATETIME_PRECISION`, - `INTERVAL_TYPE`, - `INTERVAL_PRECISION`, - `CHARACTER_SET_CATALOG`, - `CHARACTER_SET_SCHEMA`, - `CHARACTER_SET_NAME`, - `COLLATION_CATALOG`, - `COLLATION_SCHEMA`, - `COLLATION_NAME`, - `UDT_CATALOG`, - `UDT_SCHEMA`, - `UDT_NAME`, - `SCOPE_CATALOG`, - `SCOPE_SCHEMA`, - `SCOPE_NAME`, - `MAXIMUM_CARDINALITY`, - `DTD_IDENTIFIER`, - `IS_SELF_REFERENCING`, - `IS_IDENTITY`, - `IDENTITY_GENERATION`, - `IDENTITY_START`, - `IDENTITY_INCREMENT`, - `IDENTITY_MAXIMUM`, - `IDENTITY_MINIMUM`, - `IDENTITY_CYCLE`, - `IS_GENERATED`, - `GENERATION_EXPRESSION`, - `IS_SYSTEM_TIME_PERIOD_START`, - `IS_SYSTEM_TIME_PERIOD_END`, - `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, - `IS_UPDATABLE`, - `DECLARED_DATA_TYPE`, - `DECLARED_NUMERIC_PRECISION`, - `DECLARED_NUMERIC_SCALE` -) AS -SELECT DISTINCT - 'default', - D.NAME, - T.TBL_NAME, - C.COLUMN_NAME, - C.INTEGER_IDX, - cast (null as string), - 'YES', - C.TYPE_NAME as TYPE_NAME, - CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) - WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) - WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 - WHEN lower(C.TYPE_NAME) = 'int' THEN 10 - WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 - WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 - WHEN lower(C.TYPE_NAME) = 'float' THEN 23 - WHEN lower(C.TYPE_NAME) = 'double' THEN 53 - WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'int' THEN 10 - WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'float' THEN 2 - WHEN lower(C.TYPE_NAME) = 'double' THEN 2 - WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 - WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 - ELSE null END, - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - C.CD_ID, - 'NO', - 'NO', - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - 'NEVER', - cast (null as string), - 'NO', - 'NO', - cast (null as string), - 'YES', - C.TYPE_NAME as DECLARED_DATA_TYPE, - CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 - WHEN lower(C.TYPE_NAME) = 'int' THEN 10 - WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 - WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 - WHEN lower(C.TYPE_NAME) = 'float' THEN 23 - WHEN lower(C.TYPE_NAME) = 'double' THEN 53 - WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'int' THEN 10 - WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'float' THEN 2 - WHEN lower(C.TYPE_NAME) = 'double' THEN 2 - WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 - ELSE null END -FROM - `sys`.`COLUMNS_V2` C JOIN `sys`.`SDS` S ON (C.`CD_ID` = S.`CD_ID`) - JOIN `sys`.`TBLS` T ON (S.`SD_ID` = T.`SD_ID`) - JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) - LEFT JOIN `sys`.`TBL_COL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND C.`COLUMN_NAME` = P.`COLUMN_NAME` - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND P.`TBL_COL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@columns_v2 -PREHOOK: Input: sys@dbs -PREHOOK: Input: sys@sds -PREHOOK: Input: sys@tbl_col_privs -PREHOOK: Input: sys@tbls -PREHOOK: Output: INFORMATION_SCHEMA@COLUMNS -PREHOOK: Output: database:information_schema -POSTHOOK: query: CREATE OR REPLACE VIEW `COLUMNS` -( - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `COLUMN_NAME`, - `ORDINAL_POSITION`, - `COLUMN_DEFAULT`, - `IS_NULLABLE`, - `DATA_TYPE`, - `CHARACTER_MAXIMUM_LENGTH`, - `CHARACTER_OCTET_LENGTH`, - `NUMERIC_PRECISION`, - `NUMERIC_PRECISION_RADIX`, - `NUMERIC_SCALE`, - `DATETIME_PRECISION`, - `INTERVAL_TYPE`, - `INTERVAL_PRECISION`, - `CHARACTER_SET_CATALOG`, - `CHARACTER_SET_SCHEMA`, - `CHARACTER_SET_NAME`, - `COLLATION_CATALOG`, - `COLLATION_SCHEMA`, - `COLLATION_NAME`, - `UDT_CATALOG`, - `UDT_SCHEMA`, - `UDT_NAME`, - `SCOPE_CATALOG`, - `SCOPE_SCHEMA`, - `SCOPE_NAME`, - `MAXIMUM_CARDINALITY`, - `DTD_IDENTIFIER`, - `IS_SELF_REFERENCING`, - `IS_IDENTITY`, - `IDENTITY_GENERATION`, - `IDENTITY_START`, - `IDENTITY_INCREMENT`, - `IDENTITY_MAXIMUM`, - `IDENTITY_MINIMUM`, - `IDENTITY_CYCLE`, - `IS_GENERATED`, - `GENERATION_EXPRESSION`, - `IS_SYSTEM_TIME_PERIOD_START`, - `IS_SYSTEM_TIME_PERIOD_END`, - `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, - `IS_UPDATABLE`, - `DECLARED_DATA_TYPE`, - `DECLARED_NUMERIC_PRECISION`, - `DECLARED_NUMERIC_SCALE` -) AS -SELECT DISTINCT - 'default', - D.NAME, - T.TBL_NAME, - C.COLUMN_NAME, - C.INTEGER_IDX, - cast (null as string), - 'YES', - C.TYPE_NAME as TYPE_NAME, - CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) - WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) - WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 - WHEN lower(C.TYPE_NAME) = 'int' THEN 10 - WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 - WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 - WHEN lower(C.TYPE_NAME) = 'float' THEN 23 - WHEN lower(C.TYPE_NAME) = 'double' THEN 53 - WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'int' THEN 10 - WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'float' THEN 2 - WHEN lower(C.TYPE_NAME) = 'double' THEN 2 - WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 - WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 - ELSE null END, - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - C.CD_ID, - 'NO', - 'NO', - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - cast (null as string), - 'NEVER', - cast (null as string), - 'NO', - 'NO', - cast (null as string), - 'YES', - C.TYPE_NAME as DECLARED_DATA_TYPE, - CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 - WHEN lower(C.TYPE_NAME) = 'int' THEN 10 - WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 - WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 - WHEN lower(C.TYPE_NAME) = 'float' THEN 23 - WHEN lower(C.TYPE_NAME) = 'double' THEN 53 - WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) - ELSE null END, - CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'int' THEN 10 - WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 - WHEN lower(C.TYPE_NAME) = 'float' THEN 2 - WHEN lower(C.TYPE_NAME) = 'double' THEN 2 - WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 - WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 - ELSE null END -FROM - `sys`.`COLUMNS_V2` C JOIN `sys`.`SDS` S ON (C.`CD_ID` = S.`CD_ID`) - JOIN `sys`.`TBLS` T ON (S.`SD_ID` = T.`SD_ID`) - JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) - LEFT JOIN `sys`.`TBL_COL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND C.`COLUMN_NAME` = P.`COLUMN_NAME` - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND P.`TBL_COL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@columns_v2 -POSTHOOK: Input: sys@dbs -POSTHOOK: Input: sys@sds -POSTHOOK: Input: sys@tbl_col_privs -POSTHOOK: Input: sys@tbls -POSTHOOK: Output: INFORMATION_SCHEMA@COLUMNS -POSTHOOK: Output: database:information_schema -POSTHOOK: Lineage: COLUMNS.character_maximum_length EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.character_octet_length EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.character_set_catalog EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.character_set_name EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.character_set_schema EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.collation_catalog EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.collation_name EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.collation_schema EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.column_default EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.column_name SIMPLE [(columns_v2)c.FieldSchema(name:column_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.data_type SIMPLE [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.datetime_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.declared_data_type SIMPLE [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.declared_numeric_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.declared_numeric_scale EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.dtd_identifier SIMPLE [(columns_v2)c.FieldSchema(name:cd_id, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.generation_expression EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.identity_cycle EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.identity_generation EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.identity_increment EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.identity_maximum EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.identity_minimum EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.identity_start EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.interval_precision EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.interval_type EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.is_generated SIMPLE [] -POSTHOOK: Lineage: COLUMNS.is_identity SIMPLE [] -POSTHOOK: Lineage: COLUMNS.is_nullable SIMPLE [] -POSTHOOK: Lineage: COLUMNS.is_self_referencing SIMPLE [] -POSTHOOK: Lineage: COLUMNS.is_system_time_period_end SIMPLE [] -POSTHOOK: Lineage: COLUMNS.is_system_time_period_start SIMPLE [] -POSTHOOK: Lineage: COLUMNS.is_updatable SIMPLE [] -POSTHOOK: Lineage: COLUMNS.maximum_cardinality EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.numeric_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.numeric_precision_radix EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.numeric_scale EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.ordinal_position SIMPLE [(columns_v2)c.FieldSchema(name:integer_idx, type:int, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.scope_catalog EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.scope_name EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.scope_schema EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.system_time_period_timestamp_generation EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.table_catalog SIMPLE [] -POSTHOOK: Lineage: COLUMNS.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMNS.udt_catalog EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.udt_name EXPRESSION [] -POSTHOOK: Lineage: COLUMNS.udt_schema EXPRESSION [] -PREHOOK: query: CREATE OR REPLACE VIEW `COLUMN_PRIVILEGES` -( - `GRANTOR`, - `GRANTEE`, - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `COLUMN_NAME`, - `PRIVILEGE_TYPE`, - `IS_GRANTABLE` -) AS -SELECT DISTINCT - P.`GRANTOR`, - P.`PRINCIPAL_NAME`, - 'default', - D.`NAME`, - T.`TBL_NAME`, - P.`COLUMN_NAME`, - P.`TBL_COL_PRIV`, - IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') -FROM - `sys`.`TBL_COL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) - JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) - JOIN `sys`.`SDS` S ON (S.`SD_ID` = T.`SD_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR P2.`TBL_ID` IS NOT NULL - AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` - AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) - AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() AND P2.`AUTHORIZER`=current_authorizer() -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@dbs -PREHOOK: Input: sys@sds -PREHOOK: Input: sys@tbl_col_privs -PREHOOK: Input: sys@tbl_privs -PREHOOK: Input: sys@tbls -PREHOOK: Output: INFORMATION_SCHEMA@COLUMN_PRIVILEGES -PREHOOK: Output: database:information_schema -POSTHOOK: query: CREATE OR REPLACE VIEW `COLUMN_PRIVILEGES` -( - `GRANTOR`, - `GRANTEE`, - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `COLUMN_NAME`, - `PRIVILEGE_TYPE`, - `IS_GRANTABLE` -) AS -SELECT DISTINCT - P.`GRANTOR`, - P.`PRINCIPAL_NAME`, - 'default', - D.`NAME`, - T.`TBL_NAME`, - P.`COLUMN_NAME`, - P.`TBL_COL_PRIV`, - IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') -FROM - `sys`.`TBL_COL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) - JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) - JOIN `sys`.`SDS` S ON (S.`SD_ID` = T.`SD_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) -WHERE - NOT restrict_information_schema() OR P2.`TBL_ID` IS NOT NULL - AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` - AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) - AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() AND P2.`AUTHORIZER`=current_authorizer() -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@dbs -POSTHOOK: Input: sys@sds -POSTHOOK: Input: sys@tbl_col_privs -POSTHOOK: Input: sys@tbl_privs -POSTHOOK: Input: sys@tbls -POSTHOOK: Output: INFORMATION_SCHEMA@COLUMN_PRIVILEGES -POSTHOOK: Output: database:information_schema -POSTHOOK: Lineage: COLUMN_PRIVILEGES.column_name SIMPLE [(tbl_col_privs)p.FieldSchema(name:column_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMN_PRIVILEGES.grantee SIMPLE [(tbl_col_privs)p.FieldSchema(name:principal_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMN_PRIVILEGES.grantor SIMPLE [(tbl_col_privs)p.FieldSchema(name:grantor, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMN_PRIVILEGES.is_grantable EXPRESSION [(tbl_col_privs)p.FieldSchema(name:grant_option, type:int, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMN_PRIVILEGES.privilege_type SIMPLE [(tbl_col_privs)p.FieldSchema(name:tbl_col_priv, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_catalog SIMPLE [] -POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] -PREHOOK: query: CREATE OR REPLACE VIEW `VIEWS` -( - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `VIEW_DEFINITION`, - `CHECK_OPTION`, - `IS_UPDATABLE`, - `IS_INSERTABLE_INTO`, - `IS_TRIGGER_UPDATABLE`, - `IS_TRIGGER_DELETABLE`, - `IS_TRIGGER_INSERTABLE_INTO` -) AS -SELECT DISTINCT - 'default', - D.NAME, - T.TBL_NAME, - T.VIEW_ORIGINAL_TEXT, - CAST(NULL as string), - false, - false, - false, - false, - false -FROM - `sys`.`DBS` D JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - length(T.VIEW_ORIGINAL_TEXT) > 0 - AND (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()) -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@dbs -PREHOOK: Input: sys@tbl_privs -PREHOOK: Input: sys@tbls -PREHOOK: Output: INFORMATION_SCHEMA@VIEWS -PREHOOK: Output: database:information_schema -POSTHOOK: query: CREATE OR REPLACE VIEW `VIEWS` -( - `TABLE_CATALOG`, - `TABLE_SCHEMA`, - `TABLE_NAME`, - `VIEW_DEFINITION`, - `CHECK_OPTION`, - `IS_UPDATABLE`, - `IS_INSERTABLE_INTO`, - `IS_TRIGGER_UPDATABLE`, - `IS_TRIGGER_DELETABLE`, - `IS_TRIGGER_INSERTABLE_INTO` -) AS -SELECT DISTINCT - 'default', - D.NAME, - T.TBL_NAME, - T.VIEW_ORIGINAL_TEXT, - CAST(NULL as string), - false, - false, - false, - false, - false -FROM - `sys`.`DBS` D JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) - LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - length(T.VIEW_ORIGINAL_TEXT) > 0 - AND (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()) -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@dbs -POSTHOOK: Input: sys@tbl_privs -POSTHOOK: Input: sys@tbls -POSTHOOK: Output: INFORMATION_SCHEMA@VIEWS -POSTHOOK: Output: database:information_schema -POSTHOOK: Lineage: VIEWS.check_option EXPRESSION [] -POSTHOOK: Lineage: VIEWS.is_insertable_into SIMPLE [] -POSTHOOK: Lineage: VIEWS.is_trigger_deletable SIMPLE [] -POSTHOOK: Lineage: VIEWS.is_trigger_insertable_into SIMPLE [] -POSTHOOK: Lineage: VIEWS.is_trigger_updatable SIMPLE [] -POSTHOOK: Lineage: VIEWS.is_updatable SIMPLE [] -POSTHOOK: Lineage: VIEWS.table_catalog SIMPLE [] -POSTHOOK: Lineage: VIEWS.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: VIEWS.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: VIEWS.view_definition SIMPLE [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] -PREHOOK: query: CREATE OR REPLACE VIEW `COMPACTIONS` -( - `C_ID`, - `C_CATALOG`, - `C_DATABASE`, - `C_TABLE`, - `C_PARTITION`, - `C_TYPE`, - `C_STATE`, - `C_HOSTNAME`, - `C_WORKER_ID`, - `C_START`, - `C_DURATION`, - `C_HADOOP_JOB_ID`, - `C_RUN_AS`, - `C_HIGHEST_WRITE_ID` -) AS -SELECT DISTINCT - C_ID, - C_CATALOG, - C_DATABASE, - C_TABLE, - C_PARTITION, - C_TYPE, - C_STATE, - C_HOSTNAME, - C_WORKER_ID, - C_START, - C_DURATION, - C_HADOOP_JOB_ID, - C_RUN_AS, - C_HIGHEST_WRITE_ID -FROM - `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`) - JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`) - LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()) -PREHOOK: type: CREATEVIEW -PREHOOK: Input: sys@compaction_queue -PREHOOK: Input: sys@compactions -PREHOOK: Input: sys@completed_compactions -PREHOOK: Input: sys@dbs -PREHOOK: Input: sys@tbl_privs -PREHOOK: Input: sys@tbls -PREHOOK: Output: INFORMATION_SCHEMA@COMPACTIONS -PREHOOK: Output: database:information_schema -POSTHOOK: query: CREATE OR REPLACE VIEW `COMPACTIONS` -( - `C_ID`, - `C_CATALOG`, - `C_DATABASE`, - `C_TABLE`, - `C_PARTITION`, - `C_TYPE`, - `C_STATE`, - `C_HOSTNAME`, - `C_WORKER_ID`, - `C_START`, - `C_DURATION`, - `C_HADOOP_JOB_ID`, - `C_RUN_AS`, - `C_HIGHEST_WRITE_ID` -) AS -SELECT DISTINCT - C_ID, - C_CATALOG, - C_DATABASE, - C_TABLE, - C_PARTITION, - C_TYPE, - C_STATE, - C_HOSTNAME, - C_WORKER_ID, - C_START, - C_DURATION, - C_HADOOP_JOB_ID, - C_RUN_AS, - C_HIGHEST_WRITE_ID -FROM - `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`) - JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`) - LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) -WHERE - (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL - AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' - OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) - AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()) -POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: sys@compaction_queue -POSTHOOK: Input: sys@compactions -POSTHOOK: Input: sys@completed_compactions -POSTHOOK: Input: sys@dbs -POSTHOOK: Input: sys@tbl_privs -POSTHOOK: Input: sys@tbls -POSTHOOK: Output: INFORMATION_SCHEMA@COMPACTIONS -POSTHOOK: Output: database:information_schema -POSTHOOK: Lineage: COMPACTIONS.c_catalog EXPRESSION [] -POSTHOOK: Lineage: COMPACTIONS.c_database EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_database, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_database, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_duration EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_end, type:bigint, comment:from deserializer), (completed_compactions)completed_compactions.FieldSchema(name:cc_start, type:bigint, comment:from deserializer), ] -#### A masked pattern was here #### -POSTHOOK: Lineage: COMPACTIONS.c_highest_write_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_highest_write_id, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_highest_write_id, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_hostname EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_worker_id, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_worker_id, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_id, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_id, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_partition EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_partition, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_partition, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_run_as EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_run_as, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_run_as, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_start EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_start, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_start, type:bigint, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_state EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_state, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_state, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_table EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_table, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_table, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_type EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_type, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_type, type:string, comment:from deserializer), ] -POSTHOOK: Lineage: COMPACTIONS.c_worker_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_worker_id, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_worker_id, type:string, comment:from deserializer), ] -PREHOOK: query: SHOW RESOURCE PLANS -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLANS -POSTHOOK: type: SHOW RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -PREHOOK: query: CREATE RESOURCE PLAN plan_1 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN plan_1 -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: EXPLAIN SHOW RESOURCE PLANS -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN SHOW RESOURCE PLANS -POSTHOOK: type: SHOW RESOURCEPLAN -STAGE DEPENDENCIES: - Stage-0 is a root stage - Stage-1 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-0 - Show Resource plans - - Stage: Stage-1 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SHOW RESOURCE PLANS -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLANS -POSTHOOK: type: SHOW RESOURCEPLAN -plan_1 DISABLED -PREHOOK: query: EXPLAIN SHOW RESOURCE PLAN plan_1 -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN SHOW RESOURCE PLAN plan_1 -POSTHOOK: type: SHOW RESOURCEPLAN -STAGE DEPENDENCIES: - Stage-0 is a root stage - Stage-1 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-0 - Show Resource plans - resourcePlanName: plan_1 - - Stage: Stage-1 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SHOW RESOURCE PLAN plan_1 -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLAN plan_1 -POSTHOOK: type: SHOW RESOURCEPLAN -plan_1[status=DISABLED,parallelism=null,defaultPool=default] - + default[allocFraction=1.0,schedulingPolicy=null,parallelism=4] - | mapped for default -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default DISABLED NULL default -PREHOOK: query: EXPLAIN CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 -POSTHOOK: type: CREATE RESOURCEPLAN -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Create ResourcePlan - planName: plan_2 - queryParallelism: 5 - -PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 -POSTHOOK: type: ALTER RESOURCEPLAN -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Alter Resource plans - Resource plan to modify: plan_2 - Resource plan changed fields: - shouldValidate: false - -PREHOOK: query: ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SHOW RESOURCE PLANS -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLANS -POSTHOOK: type: SHOW RESOURCEPLAN -plan_1 DISABLED -plan_2 DISABLED 10 -PREHOOK: query: SHOW RESOURCE PLAN plan_2 -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLAN plan_2 -POSTHOOK: type: SHOW RESOURCEPLAN -plan_2[status=DISABLED,parallelism=10,defaultPool=default] - + default[allocFraction=1.0,schedulingPolicy=null,parallelism=5] - | mapped for default -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default DISABLED NULL default -plan_2 default DISABLED 10 default -PREHOOK: query: CREATE RESOURCE PLAN plan_2 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_2 already exists -PREHOOK: query: CREATE RESOURCE PLAN IF NOT EXISTS plan_2 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN IF NOT EXISTS plan_2 -POSTHOOK: type: CREATE RESOURCEPLAN -FAILED: SemanticException Invalid create arguments (tok_create_rp plan_3 (tok_query_parallelism 5) (tok_default_pool all)) -PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_2 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Resource plan name should be unique: ) -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default DISABLED NULL default -plan_2 default DISABLED 10 default -PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_3 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_3 -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default DISABLED NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 4 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 4 -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default DISABLED 4 default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 UNSET QUERY_PARALLELISM -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 UNSET QUERY_PARALLELISM -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default DISABLED NULL default -PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 -POSTHOOK: type: ALTER RESOURCEPLAN -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Alter Resource plans - Resource plan to modify: plan_3 - Resource plan changed fields: - shouldValidate: false - -PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default1) -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default DISABLED NULL default -PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 ENABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 ENABLE -POSTHOOK: type: ALTER RESOURCEPLAN -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Alter Resource plans - Resource plan to modify: plan_3 - Resource plan changed fields: - shouldValidate: false - -PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 -POSTHOOK: type: ALTER RESOURCEPLAN -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Alter Resource plans - Resource plan to modify: plan_3 - Resource plan changed fields: - shouldValidate: false - -PREHOOK: query: ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) -PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) -PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default DISABLED NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command)) -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default DISABLED NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default DISABLED NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default ENABLED NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default ACTIVE NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default ACTIVE NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default ACTIVE NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default ACTIVE NULL default -PREHOOK: query: DISABLE WORKLOAD MANAGEMENT -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DISABLE WORKLOAD MANAGEMENT -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default ENABLED NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE ACTIVATE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default DISABLED 10 default -plan_3 default ACTIVE NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default ENABLED 10 default -plan_3 default ACTIVE NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_2 ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ACTIVATE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default ACTIVE 10 default -plan_3 default ENABLED NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default ACTIVE 10 default -plan_3 default ENABLED NULL default -PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default ACTIVE 10 default -plan_3 default DISABLED NULL default -PREHOOK: query: EXPLAIN DROP RESOURCE PLAN plan_2 -PREHOOK: type: DROP RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN DROP RESOURCE PLAN plan_2 -POSTHOOK: type: DROP RESOURCEPLAN -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Drop Resource plans - resourcePlanName: plan_2 - -PREHOOK: query: DROP RESOURCE PLAN plan_2 -PREHOOK: type: DROP RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop an active resource plan) -PREHOOK: query: DROP RESOURCE PLAN plan_3 -PREHOOK: type: DROP RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP RESOURCE PLAN plan_3 -POSTHOOK: type: DROP RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default ACTIVE 10 default -PREHOOK: query: DROP RESOURCE PLAN plan_99999 -PREHOOK: type: DROP RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_99999 does not exist -PREHOOK: query: DROP RESOURCE PLAN IF EXISTS plan_99999 -PREHOOK: type: DROP RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP RESOURCE PLAN IF EXISTS plan_99999 -POSTHOOK: type: DROP RESOURCEPLAN -PREHOOK: query: CREATE RESOURCE PLAN `table` -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN `table` -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: ALTER RESOURCE PLAN `table` SET QUERY_PARALLELISM = 1 -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN `table` SET QUERY_PARALLELISM = 1 -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_2 default ACTIVE 10 default -table default DISABLED 1 default -PREHOOK: query: create table wm_test(key string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: INFORMATION_SCHEMA@wm_test -PREHOOK: Output: database:information_schema -POSTHOOK: query: create table wm_test(key string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: INFORMATION_SCHEMA@wm_test -POSTHOOK: Output: database:information_schema -PREHOOK: query: select key as 30min from wm_test -PREHOOK: type: QUERY -PREHOOK: Input: information_schema@wm_test -#### A masked pattern was here #### -POSTHOOK: query: select key as 30min from wm_test -POSTHOOK: type: QUERY -POSTHOOK: Input: information_schema@wm_test -#### A masked pattern was here #### -PREHOOK: query: select "10kb" as str from wm_test -PREHOOK: type: QUERY -PREHOOK: Input: information_schema@wm_test -#### A masked pattern was here #### -POSTHOOK: query: select "10kb" as str from wm_test -POSTHOOK: type: QUERY -POSTHOOK: Input: information_schema@wm_test -#### A masked pattern was here #### -PREHOOK: query: drop table wm_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: information_schema@wm_test -PREHOOK: Output: information_schema@wm_test -POSTHOOK: query: drop table wm_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: information_schema@wm_test -POSTHOOK: Output: information_schema@wm_test -PREHOOK: query: CREATE RESOURCE PLAN plan_1 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN plan_1 -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: EXPLAIN CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL -POSTHOOK: type: CREATE TRIGGER -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Create WM Trigger - trigger: - -PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_1 BYTES_READ > '10kb' KILL -PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN ELAPSED_TIME > 300 DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Trigger already exists, use alter: ) -FAILED: ParseException line 4:60 mismatched input 'AND' expecting DO near ''30sec'' in create trigger statement -FAILED: ParseException line 2:63 mismatched input 'OR' expecting DO near ''30second'' in create trigger statement -FAILED: ParseException line 2:50 mismatched input '>=' expecting > near 'ELAPSED_TIME' in comparisionOperator -FAILED: ParseException line 2:50 mismatched input '<' expecting > near 'ELAPSED_TIME' in comparisionOperator -FAILED: ParseException line 2:50 mismatched input '<=' expecting > near 'ELAPSED_TIME' in comparisionOperator -FAILED: ParseException line 2:50 mismatched input '=' expecting > near 'ELAPSED_TIME' in comparisionOperator -PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '10k' DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid size unit k -PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '10 millis' DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid time unit millis -PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '-1000' DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Illegal value for counter limit. Expected a positive long value. -PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '30hour' DO MOVE TO slow_pool -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '30hour' DO MOVE TO slow_pool -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_1 BYTES_READ > '10kb' KILL -plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool -PREHOOK: query: EXPLAIN ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL -PREHOOK: type: ALTER TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL -POSTHOOK: type: ALTER TRIGGER -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Alter WM Trigger - trigger: - -PREHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL -PREHOOK: type: ALTER TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL -POSTHOOK: type: ALTER TRIGGER -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_1 BYTES_READ > '1GB' KILL -plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool -PREHOOK: query: EXPLAIN DROP TRIGGER plan_1.trigger_1 -PREHOOK: type: DROP TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN DROP TRIGGER plan_1.trigger_1 -POSTHOOK: type: DROP TRIGGER -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Drop WM Trigger - resourcePlanName: plan_1 - triggerName: trigger_1 - -PREHOOK: query: DROP TRIGGER plan_1.trigger_1 -PREHOOK: type: DROP TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP TRIGGER plan_1.trigger_1 -POSTHOOK: type: DROP TRIGGER -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool -PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > '100mb' DO MOVE TO null_pool -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) -PREHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > '100KB' DO MOVE TO `default` -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > '100KB' DO MOVE TO `default` -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > '100MB' DO MOVE TO `default` -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > '100MB' DO MOVE TO `default` -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > "1GB" DO MOVE TO `default` -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > "1GB" DO MOVE TO `default` -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN ELAPSED_TIME > '1hour' DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN ELAPSED_TIME > '1hour' DO KILL -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool -table default database BYTES_WRITTEN > "1GB" MOVE TO default -table default table BYTES_WRITTEN > '100KB' MOVE TO default -table default trigger BYTES_WRITTEN > '100MB' MOVE TO default -table default trigger1 ELAPSED_TIME > 10 KILL -table default trigger2 ELAPSED_TIME > '1hour' KILL -PREHOOK: query: DROP TRIGGER `table`.`database` -PREHOOK: type: DROP TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP TRIGGER `table`.`database` -POSTHOOK: type: DROP TRIGGER -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool -table default table BYTES_WRITTEN > '100KB' MOVE TO default -table default trigger BYTES_WRITTEN > '100MB' MOVE TO default -table default trigger1 ELAPSED_TIME > 10 KILL -table default trigger2 ELAPSED_TIME > '1hour' KILL -PREHOOK: query: ALTER RESOURCE PLAN plan_1 ENABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_1 ENABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ENABLED NULL default -plan_2 default ACTIVE 10 default -table default DISABLED 1 default -PREHOOK: query: DROP TRIGGER plan_1.trigger_2 -PREHOOK: type: DROP TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) -PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000gb" DO KILL -PREHOOK: type: ALTER TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) -PREHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ACTIVE NULL default -plan_2 default ENABLED 10 default -table default DISABLED 1 default -PREHOOK: query: DROP TRIGGER plan_1.trigger_2 -PREHOOK: type: DROP TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) -PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000KB" DO KILL -PREHOOK: type: ALTER TRIGGER -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) -PREHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 0 DO MOVE TO null_pool -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 0 DO MOVE TO null_pool -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool -plan_2 default trigger_1 BYTES_READ > 0 MOVE TO null_pool -table default table BYTES_WRITTEN > '100KB' MOVE TO default -table default trigger BYTES_WRITTEN > '100MB' MOVE TO default -table default trigger1 ELAPSED_TIME > 10 KILL -table default trigger2 ELAPSED_TIME > '1hour' KILL -PREHOOK: query: EXPLAIN CREATE POOL plan_1.default WITH - ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN CREATE POOL plan_1.default WITH - ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' -POSTHOOK: type: CREATE POOL -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Create Pool - pool: - -PREHOOK: query: CREATE POOL plan_1.default WITH - ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) -FAILED: SemanticException alloc_fraction should be specified for a pool -FAILED: SemanticException query_parallelism should be specified for a pool -PREHOOK: query: CREATE POOL plan_2.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Pool already exists: ) -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default default 1.0 5 NULL -table default default 1.0 4 NULL -FAILED: SemanticException Invalid scheduling policy invalid -PREHOOK: query: CREATE POOL plan_2.default.c1 WITH - ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair' -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL plan_2.default.c1 WITH - ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair' -POSTHOOK: type: CREATE POOL -PREHOOK: query: CREATE POOL plan_2.default.c2 WITH - QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.75 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL plan_2.default.c2 WITH - QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.75 -POSTHOOK: type: CREATE POOL -PREHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE -POSTHOOK: type: ALTER RESOURCEPLAN -Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default -PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default]) -PREHOOK: query: EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 -POSTHOOK: type: ALTER POOL -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Alter Pool - pool: - poolPath: default.c2 - -PREHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER POOL plan_2.default.c2 SET SCHEDULING_POLICY='fair' -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL plan_2.default.c2 SET SCHEDULING_POLICY='fair' -POSTHOOK: type: ALTER POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default default 1.0 5 NULL -plan_2 default default.c1 0.3 3 fair -plan_2 default default.c2 0.7 1 fair -table default default 1.0 4 NULL -PREHOOK: query: ALTER POOL plan_2.default.c2 UNSET SCHEDULING_POLICY -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL plan_2.default.c2 UNSET SCHEDULING_POLICY -POSTHOOK: type: ALTER POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default default 1.0 5 NULL -plan_2 default default.c1 0.3 3 fair -plan_2 default default.c2 0.7 1 NULL -table default default 1.0 4 NULL -PREHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE -POSTHOOK: type: ALTER RESOURCEPLAN -warn: Sum of all pools' query parallelism: 9 is less than resource plan query parallelism: 10 -PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: ALTER POOL plan_2.default SET path = def -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL plan_2.default SET path = def -POSTHOOK: type: ALTER POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default def 1.0 5 NULL -plan_2 default def.c1 0.3 3 fair -plan_2 default def.c2 0.7 1 NULL -table default default 1.0 4 NULL -PREHOOK: query: EXPLAIN DROP POOL plan_2.default -PREHOOK: type: DROP POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN DROP POOL plan_2.default -POSTHOOK: type: DROP POOL -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Drop WM Pool - poolName: plan_2 - -PREHOOK: query: DROP POOL plan_2.default -PREHOOK: type: DROP POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot delete pool: default) -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default def 1.0 5 NULL -plan_2 default def.c1 0.3 3 fair -plan_2 default def.c2 0.7 1 NULL -table default default 1.0 4 NULL -PREHOOK: query: CREATE POOL plan_2.child1.child2 WITH - QUERY_PARALLELISM=2, SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.8 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Pool path is invalid, the parent does not exist) -PREHOOK: query: CREATE POOL `table`.`table` WITH - SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL `table`.`table` WITH - SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1 -POSTHOOK: type: CREATE POOL -PREHOOK: query: CREATE POOL `table`.`table`.pool1 WITH - SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL `table`.`table`.pool1 WITH - SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9 -POSTHOOK: type: CREATE POOL -PREHOOK: query: CREATE POOL `table`.`table`.pool1.child1 WITH - SCHEDULING_POLICY='fair', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL `table`.`table`.pool1.child1 WITH - SCHEDULING_POLICY='fair', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3 -POSTHOOK: type: CREATE POOL -PREHOOK: query: CREATE POOL `table`.`table`.pool1.child2 WITH - SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL `table`.`table`.pool1.child2 WITH - SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7 -POSTHOOK: type: CREATE POOL -PREHOOK: query: ALTER POOL `table`.`table` SET ALLOC_FRACTION=0.0 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL `table`.`table` SET ALLOC_FRACTION=0.0 -POSTHOOK: type: ALTER POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default def 1.0 5 NULL -plan_2 default def.c1 0.3 3 fair -plan_2 default def.c2 0.7 1 NULL -table default default 1.0 4 NULL -table default table 0.0 1 fifo -table default table.pool1 0.9 3 fair -table default table.pool1.child1 0.3 1 fair -table default table.pool1.child2 0.7 3 fair -PREHOOK: query: ALTER POOL `table`.`table`.pool1 SET PATH = `table`.pool -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL `table`.`table`.pool1 SET PATH = `table`.pool -POSTHOOK: type: ALTER POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default def 1.0 5 NULL -plan_2 default def.c1 0.3 3 fair -plan_2 default def.c2 0.7 1 NULL -table default default 1.0 4 NULL -table default table 0.0 1 fifo -table default table.pool 0.9 3 fair -table default table.pool.child1 0.3 1 fair -table default table.pool.child2 0.7 3 fair -PREHOOK: query: DROP POOL `table`.`table` -PREHOOK: type: DROP POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop a pool that has child pools) -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default def 1.0 5 NULL -plan_2 default def.c1 0.3 3 fair -plan_2 default def.c2 0.7 1 NULL -table default default 1.0 4 NULL -table default table 0.0 1 fifo -table default table.pool 0.9 3 fair -table default table.pool.child1 0.3 1 fair -table default table.pool.child2 0.7 3 fair -PREHOOK: query: DROP POOL `table`.default -PREHOOK: type: DROP POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop default pool of a resource plan) -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default def 1.0 5 NULL -plan_2 default def.c1 0.3 3 fair -plan_2 default def.c2 0.7 1 NULL -table default default 1.0 4 NULL -table default table 0.0 1 fifo -table default table.pool 0.9 3 fair -table default table.pool.child1 0.3 1 fair -table default table.pool.child2 0.7 3 fair -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ACTIVE NULL default -plan_2 default DISABLED 10 def -table default DISABLED 1 default -PREHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: DROP POOL `table`.default -PREHOOK: type: DROP POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP POOL `table`.default -POSTHOOK: type: DROP POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default def 1.0 5 NULL -plan_2 default def.c1 0.3 3 fair -plan_2 default def.c2 0.7 1 NULL -table default table 0.0 1 fifo -table default table.pool 0.9 3 fair -table default table.pool.child1 0.3 1 fair -table default table.pool.child2 0.7 3 fair -PREHOOK: query: ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ACTIVE NULL default -plan_2 default DISABLED 10 def -table default DISABLED 1 NULL -PREHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 -POSTHOOK: type: ALTER POOL -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Create Trigger to pool mappings - resourcePlanName: plan_2 - Pool path: def.c1 - Trigger name: trigger_1 - -PREHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER POOL plan_2.def.c2 ADD TRIGGER trigger_1 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL plan_2.def.c2 ADD TRIGGER trigger_1 -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER POOL `table`.`table` ADD TRIGGER `table` -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL `table`.`table` ADD TRIGGER `table` -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `table` -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `table` -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `trigger1` -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `trigger1` -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER TRIGGER `table`.`trigger1` ADD TO POOL `table`.pool.child2 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` ADD TO POOL `table`.pool.child2 -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger2` -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger2` -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER TRIGGER `table`.`trigger1` ADD TO UNMANAGED -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` ADD TO UNMANAGED -POSTHOOK: type: ALTER POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -plan_2 default def.c1 trigger_1 -plan_2 default def.c2 trigger_1 -table default trigger1 -table default table table -table default table.pool.child1 table -table default table.pool.child1 trigger1 -table default table.pool.child2 trigger1 -table default table.pool.child2 trigger2 -PREHOOK: query: SHOW RESOURCE PLAN `table` -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLAN `table` -POSTHOOK: type: SHOW RESOURCEPLAN -table[status=DISABLED,parallelism=1,defaultPool=null] - + table[allocFraction=0.0,schedulingPolicy=fifo,parallelism=1] - | trigger table: if (BYTES_WRITTEN > '100KB') { MOVE TO default } - + pool[allocFraction=0.9,schedulingPolicy=fair,parallelism=3] - + child2[allocFraction=0.7,schedulingPolicy=fair,parallelism=3] - | trigger trigger1: if (ELAPSED_TIME > 10) { KILL } - | trigger trigger2: if (ELAPSED_TIME > '1hour') { KILL } - + child1[allocFraction=0.3,schedulingPolicy=fair,parallelism=1] - | trigger table: if (BYTES_WRITTEN > '100KB') { MOVE TO default } - | trigger trigger1: if (ELAPSED_TIME > 10) { KILL } - + - | trigger trigger1: if (ELAPSED_TIME > 10) { KILL } - + - | trigger trigger: if (BYTES_WRITTEN > '100MB') { MOVE TO default } -PREHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM POOL `table`.pool.child2 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM POOL `table`.pool.child2 -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM UNMANAGED -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM UNMANAGED -POSTHOOK: type: ALTER POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -plan_2 default def.c1 trigger_1 -plan_2 default def.c2 trigger_1 -table default table table -table default table.pool.child1 table -table default table.pool.child1 trigger1 -table default table.pool.child2 trigger2 -PREHOOK: query: ALTER POOL plan_2.default ADD TRIGGER trigger_1 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default) -PREHOOK: query: ALTER POOL plan_2.def ADD TRIGGER trigger_2 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) -PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -plan_2 default def.c1 trigger_1 -plan_2 default def.c2 trigger_1 -table default table table -table default table.pool.child1 table -table default table.pool.child1 trigger1 -table default table.pool.child2 trigger2 -PREHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 -POSTHOOK: type: ALTER POOL -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Drop Trigger to pool mappings - resourcePlanName: plan_2 - Pool path: def.c1 - Trigger name: trigger_1 - -PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 -POSTHOOK: type: ALTER POOL -PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_2 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) -PREHOOK: query: DROP POOL `table`.`table`.pool.child1 -PREHOOK: type: DROP POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP POOL `table`.`table`.pool.child1 -POSTHOOK: type: DROP POOL -PREHOOK: query: DROP POOL `table`.`table`.pool.child2 -PREHOOK: type: DROP POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP POOL `table`.`table`.pool.child2 -POSTHOOK: type: DROP POOL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -plan_2 default def.c2 trigger_1 -table default table table -PREHOOK: query: EXPLAIN CREATE USER MAPPING "user1" IN plan_2 TO def -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN CREATE USER MAPPING "user1" IN plan_2 TO def -POSTHOOK: type: CREATE MAPPING -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Create Mapping - mapping: - -PREHOOK: query: CREATE USER MAPPING "user1" IN plan_2 TO def -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE USER MAPPING "user1" IN plan_2 TO def -POSTHOOK: type: CREATE MAPPING -PREHOOK: query: CREATE USER MAPPING 'user2' IN plan_2 TO def WITH ORDER 1 -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE USER MAPPING 'user2' IN plan_2 TO def WITH ORDER 1 -POSTHOOK: type: CREATE MAPPING -PREHOOK: query: CREATE GROUP MAPPING "group1" IN plan_2 TO def.c1 -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE GROUP MAPPING "group1" IN plan_2 TO def.c1 -POSTHOOK: type: CREATE MAPPING -PREHOOK: query: CREATE APPLICATION MAPPING "app1" IN plan_2 TO def.c1 -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE APPLICATION MAPPING "app1" IN plan_2 TO def.c1 -POSTHOOK: type: CREATE MAPPING -PREHOOK: query: CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1 -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1 -POSTHOOK: type: CREATE MAPPING -PREHOOK: query: EXPLAIN CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 -POSTHOOK: type: CREATE MAPPING -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Create Mapping - mapping: - -PREHOOK: query: CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 -POSTHOOK: type: CREATE MAPPING -PREHOOK: query: EXPLAIN ALTER USER MAPPING "user1" IN plan_2 UNMANAGED -PREHOOK: type: ALTER MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN ALTER USER MAPPING "user1" IN plan_2 UNMANAGED -POSTHOOK: type: ALTER MAPPING -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Alter Mapping - mapping: - -PREHOOK: query: ALTER USER MAPPING "user1" IN plan_2 UNMANAGED -PREHOOK: type: ALTER MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER USER MAPPING "user1" IN plan_2 UNMANAGED -POSTHOOK: type: ALTER MAPPING -PREHOOK: query: SHOW RESOURCE PLAN plan_2 -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLAN plan_2 -POSTHOOK: type: SHOW RESOURCEPLAN -plan_2[status=DISABLED,parallelism=10,defaultPool=def] - + def[allocFraction=1.0,schedulingPolicy=null,parallelism=5] - | mapped for users: user2 - | mapped for default - + c2[allocFraction=0.7,schedulingPolicy=null,parallelism=1] - | trigger trigger_1: if (BYTES_READ > 0) { MOVE TO null_pool } - | mapped for groups: group2 - + c1[allocFraction=0.3,schedulingPolicy=fair,parallelism=3] - | mapped for groups: group1 - | mapped for applications: app1 - + - | mapped for users: user1 - | mapped for groups: group3 -PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_mappings -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_mappings -#### A masked pattern was here #### -plan_2 default APPLICATION app1 def.c1 0 -plan_2 default GROUP group1 def.c1 0 -plan_2 default GROUP group2 def.c2 1 -plan_2 default GROUP group3 1 -plan_2 default USER user1 0 -plan_2 default USER user2 def 1 -PREHOOK: query: DROP POOL plan_2.def.c1 -PREHOOK: type: DROP POOL -PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Please remove all mappings for this pool.) -PREHOOK: query: EXPLAIN DROP USER MAPPING "user2" in plan_2 -PREHOOK: type: DROP MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN DROP USER MAPPING "user2" in plan_2 -POSTHOOK: type: DROP MAPPING -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Drop mapping - mapping: - -PREHOOK: query: DROP USER MAPPING "user2" in plan_2 -PREHOOK: type: DROP MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP USER MAPPING "user2" in plan_2 -POSTHOOK: type: DROP MAPPING -PREHOOK: query: EXPLAIN DROP GROUP MAPPING "group2" in plan_2 -PREHOOK: type: DROP MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: EXPLAIN DROP GROUP MAPPING "group2" in plan_2 -POSTHOOK: type: DROP MAPPING -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Drop mapping - mapping: - -PREHOOK: query: DROP GROUP MAPPING "group2" in plan_2 -PREHOOK: type: DROP MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP GROUP MAPPING "group2" in plan_2 -POSTHOOK: type: DROP MAPPING -PREHOOK: query: DROP GROUP MAPPING "group3" in plan_2 -PREHOOK: type: DROP MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP GROUP MAPPING "group3" in plan_2 -POSTHOOK: type: DROP MAPPING -PREHOOK: query: DROP APPLICATION MAPPING "app1" in plan_2 -PREHOOK: type: DROP MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP APPLICATION MAPPING "app1" in plan_2 -POSTHOOK: type: DROP MAPPING -PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_mappings -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_mappings -#### A masked pattern was here #### -plan_2 default GROUP group1 def.c1 0 -plan_2 default USER user1 0 -PREHOOK: query: CREATE RESOURCE PLAN plan_4 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN plan_4 -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SHOW RESOURCE PLAN plan_2 -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLAN plan_2 -POSTHOOK: type: SHOW RESOURCEPLAN -plan_2[status=DISABLED,parallelism=10,defaultPool=def] - + def[allocFraction=1.0,schedulingPolicy=null,parallelism=5] - | mapped for default - + c2[allocFraction=0.7,schedulingPolicy=null,parallelism=1] - | trigger trigger_1: if (BYTES_READ > 0) { MOVE TO null_pool } - + c1[allocFraction=0.3,schedulingPolicy=fair,parallelism=3] - | mapped for groups: group1 - + - | mapped for users: user1 -PREHOOK: query: DROP RESOURCE PLAN plan_2 -PREHOOK: type: DROP RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: DROP RESOURCE PLAN plan_2 -POSTHOOK: type: DROP RESOURCEPLAN -PREHOOK: query: CREATE RESOURCE PLAN plan_2 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN plan_2 -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ENABLED NULL default -plan_2 default DISABLED NULL default -plan_4 default ACTIVE NULL default -table default DISABLED 1 NULL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default default 1.0 4 NULL -plan_4 default default 1.0 4 NULL -table default table 0.0 1 fifo -table default table.pool 0.9 3 fair -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool -table default table BYTES_WRITTEN > '100KB' MOVE TO default -table default trigger BYTES_WRITTEN > '100MB' MOVE TO default -table default trigger1 ELAPSED_TIME > 10 KILL -table default trigger2 ELAPSED_TIME > '1hour' KILL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -table default table table -PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_mappings -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_mappings -#### A masked pattern was here #### -PREHOOK: query: CREATE RESOURCE PLAN plan_4a LIKE plan_4 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN plan_4a LIKE plan_4 -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: CREATE POOL plan_4a.pool1 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=2, ALLOC_FRACTION=0.0 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL plan_4a.pool1 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=2, ALLOC_FRACTION=0.0 -POSTHOOK: type: CREATE POOL -PREHOOK: query: CREATE USER MAPPING "user1" IN plan_4a TO pool1 -PREHOOK: type: CREATE MAPPING -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE USER MAPPING "user1" IN plan_4a TO pool1 -POSTHOOK: type: CREATE MAPPING -PREHOOK: query: CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ > '10GB' DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ > '10GB' DO KILL -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ > '11GB' DO KILL -PREHOOK: type: CREATE TRIGGER -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ > '11GB' DO KILL -POSTHOOK: type: CREATE TRIGGER -PREHOOK: query: ALTER POOL plan_4a.pool1 ADD TRIGGER trigger_2 -PREHOOK: type: ALTER POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER POOL plan_4a.pool1 ADD TRIGGER trigger_2 -POSTHOOK: type: ALTER POOL -PREHOOK: query: CREATE RESOURCE PLAN plan_4b LIKE plan_4a -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN plan_4b LIKE plan_4a -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: CREATE POOL plan_4b.pool2 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL plan_4b.pool2 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 -POSTHOOK: type: CREATE POOL -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ENABLED NULL default -plan_2 default DISABLED NULL default -plan_4 default ACTIVE NULL default -plan_4a default DISABLED NULL default -plan_4b default DISABLED NULL default -table default DISABLED 1 NULL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default default 1.0 4 NULL -plan_4 default default 1.0 4 NULL -plan_4a default default 1.0 4 NULL -plan_4a default pool1 0.0 2 fair -plan_4b default default 1.0 4 NULL -plan_4b default pool1 0.0 2 fair -plan_4b default pool2 0.0 3 fair -table default table 0.0 1 fifo -table default table.pool 0.9 3 fair -PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_triggers -#### A masked pattern was here #### -plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool -plan_4a default trigger_1 BYTES_READ > '10GB' KILL -plan_4a default trigger_2 BYTES_READ > '11GB' KILL -plan_4b default trigger_1 BYTES_READ > '10GB' KILL -plan_4b default trigger_2 BYTES_READ > '11GB' KILL -table default table BYTES_WRITTEN > '100KB' MOVE TO default -table default trigger BYTES_WRITTEN > '100MB' MOVE TO default -table default trigger1 ELAPSED_TIME > 10 KILL -table default trigger2 ELAPSED_TIME > '1hour' KILL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools_to_triggers -#### A masked pattern was here #### -plan_4a default pool1 trigger_2 -plan_4b default pool1 trigger_2 -table default table table -PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_mappings -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_mappings -#### A masked pattern was here #### -plan_4a default USER user1 pool1 0 -plan_4b default USER user1 pool1 0 -PREHOOK: query: REPLACE RESOURCE PLAN plan_4a WITH plan_4b -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: REPLACE RESOURCE PLAN plan_4a WITH plan_4b -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ENABLED NULL default -plan_2 default DISABLED NULL default -plan_4 default ACTIVE NULL default -plan_4a default DISABLED NULL default -plan_4a_old_0 default DISABLED NULL default -table default DISABLED 1 NULL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default default 1.0 4 NULL -plan_4 default default 1.0 4 NULL -plan_4a default default 1.0 4 NULL -plan_4a default pool1 0.0 2 fair -plan_4a default pool2 0.0 3 fair -plan_4a_old_0 default default 1.0 4 NULL -plan_4a_old_0 default pool1 0.0 2 fair -table default table 0.0 1 fifo -table default table.pool 0.9 3 fair -PREHOOK: query: SHOW RESOURCE PLAN plan_4a_old_0 -PREHOOK: type: SHOW RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: SHOW RESOURCE PLAN plan_4a_old_0 -POSTHOOK: type: SHOW RESOURCEPLAN -plan_4a_old_0[status=DISABLED,parallelism=null,defaultPool=default] - + default[allocFraction=1.0,schedulingPolicy=null,parallelism=4] - | mapped for default - + pool1[allocFraction=0.0,schedulingPolicy=fair,parallelism=2] - | trigger trigger_2: if (BYTES_READ > '11GB') { KILL } - | mapped for users: user1 - + - | trigger trigger_1: if (BYTES_READ > '10GB') { KILL } -PREHOOK: query: REPLACE ACTIVE RESOURCE PLAN WITH plan_4a -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: REPLACE ACTIVE RESOURCE PLAN WITH plan_4a -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ENABLED NULL default -plan_2 default DISABLED NULL default -plan_4 default ACTIVE NULL default -plan_4_old_0 default DISABLED NULL default -plan_4a_old_0 default DISABLED NULL default -table default DISABLED 1 NULL -PREHOOK: query: CREATE RESOURCE PLAN plan_4a LIKE plan_4 -PREHOOK: type: CREATE RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE RESOURCE PLAN plan_4a LIKE plan_4 -POSTHOOK: type: CREATE RESOURCEPLAN -PREHOOK: query: CREATE POOL plan_4a.pool3 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 -PREHOOK: type: CREATE POOL -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: CREATE POOL plan_4a.pool3 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 -POSTHOOK: type: CREATE POOL -PREHOOK: query: ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE -PREHOOK: type: ALTER RESOURCEPLAN -PREHOOK: Output: dummyHostnameForTest -POSTHOOK: query: ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE -POSTHOOK: type: ALTER RESOURCEPLAN -PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_resourceplans -#### A masked pattern was here #### -plan_1 default ENABLED NULL default -plan_2 default DISABLED NULL default -plan_4 default ACTIVE NULL default -plan_4_old_0 default DISABLED NULL default -plan_4_old_1 default DISABLED NULL default -plan_4a_old_0 default DISABLED NULL default -table default DISABLED 1 NULL -PREHOOK: query: SELECT * FROM SYS.WM_POOLS -PREHOOK: type: QUERY -PREHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM SYS.WM_POOLS -POSTHOOK: type: QUERY -POSTHOOK: Input: sys@wm_pools -#### A masked pattern was here #### -plan_1 default default 1.0 4 NULL -plan_2 default default 1.0 4 NULL -plan_4 default default 1.0 4 NULL -plan_4 default pool1 0.0 2 fair -plan_4 default pool2 0.0 3 fair -plan_4 default pool3 0.0 3 fair -plan_4_old_0 default default 1.0 4 NULL -plan_4_old_1 default default 1.0 4 NULL -plan_4_old_1 default pool1 0.0 2 fair -plan_4_old_1 default pool2 0.0 3 fair -plan_4a_old_0 default default 1.0 4 NULL -plan_4a_old_0 default pool1 0.0 2 fair -table default table 0.0 1 fifo -table default table.pool 0.9 3 fair