diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java index a2f49b7503..98f1fbf540 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java @@ -36,9 +36,9 @@ boolean needLock = false; /** ReadEntitites that are passed to the hooks. */ - protected Set inputs; + private Set inputs; /** List of WriteEntities that are passed to the hooks. */ - protected Set outputs; + private Set outputs; public DDLWork2() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/CreateRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/CreateRoleOperation.java index 6782b02d20..57f8b46818 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/CreateRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/CreateRoleOperation.java @@ -37,7 +37,7 @@ public CreateRoleOperation(DDLOperationContext context, CreateRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); authorizer.createRole(desc.getName(), null); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/DropRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/DropRoleOperation.java index e8b55ecf4c..8f33bd31ed 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/DropRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/DropRoleOperation.java @@ -37,7 +37,7 @@ public DropRoleOperation(DDLOperationContext context, DropRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); authorizer.dropRole(desc.getName()); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantOperation.java index 633ac434e0..041987d53e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantOperation.java @@ -43,15 +43,15 @@ public GrantOperation(DDLOperationContext context, GrantDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); //Convert to object types used by the authorization plugin interface List hivePrincipals = AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), - RoleUtils.getAuthorizationTranslator(authorizer)); + PrivilegeUtils.getAuthorizationTranslator(authorizer)); List hivePrivileges = AuthorizationUtils.getHivePrivileges(desc.getPrivileges(), - RoleUtils.getAuthorizationTranslator(authorizer)); + PrivilegeUtils.getAuthorizationTranslator(authorizer)); HivePrivilegeObject hivePrivilegeObject = - RoleUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getPrivilegeSubject()); + PrivilegeUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getPrivilegeSubject()); HivePrincipal grantorPrincipal = new HivePrincipal(desc.getGrantor(), AuthorizationUtils.getHivePrincipalType(desc.getGrantorType())); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantRoleOperation.java index 19abe2794d..b956dc87b1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantRoleOperation.java @@ -42,10 +42,10 @@ public GrantRoleOperation(DDLOperationContext context, GrantRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); List principals = - AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), RoleUtils.getAuthorizationTranslator(authorizer)); + AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), PrivilegeUtils.getAuthorizationTranslator(authorizer)); HivePrincipal grantorPrincipal = null; if (desc.getGrantor() != null) { grantorPrincipal = diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RoleUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/PrivilegeUtils.java similarity index 92% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RoleUtils.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/PrivilegeUtils.java index cfbc4cf620..ad431454de 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RoleUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/PrivilegeUtils.java @@ -33,11 +33,11 @@ import org.apache.hadoop.hive.ql.session.SessionState; /** - * Common utilities for Role related ddl operations. + * Common utilities for Privilege related ddl operations. */ -final class RoleUtils { - private RoleUtils() { - throw new UnsupportedOperationException("RoleUtils should not be instantiated"); +final class PrivilegeUtils { + private PrivilegeUtils() { + throw new UnsupportedOperationException("PrivilegeUtils should not be instantiated"); } static HiveAuthorizer getSessionAuthorizer(HiveConf conf) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeOperation.java index bf4e01a191..62d79651a0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeOperation.java @@ -43,15 +43,15 @@ public RevokeOperation(DDLOperationContext context, RevokeDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); //Convert to object types used by the authorization plugin interface List hivePrincipals = AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), - RoleUtils.getAuthorizationTranslator(authorizer)); + PrivilegeUtils.getAuthorizationTranslator(authorizer)); List hivePrivileges = AuthorizationUtils.getHivePrivileges(desc.getPrivileges(), - RoleUtils.getAuthorizationTranslator(authorizer)); + PrivilegeUtils.getAuthorizationTranslator(authorizer)); HivePrivilegeObject hivePrivilegeObject = - RoleUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getPrivilegeSubject()); + PrivilegeUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getPrivilegeSubject()); HivePrincipal grantorPrincipal = new HivePrincipal(null, null); authorizer.revokePrivileges(hivePrincipals, hivePrivileges, hivePrivilegeObject, grantorPrincipal, diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeRoleOperation.java index 0b3b27695d..b13dab544c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeRoleOperation.java @@ -42,10 +42,10 @@ public RevokeRoleOperation(DDLOperationContext context, RevokeRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); List principals = - AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), RoleUtils.getAuthorizationTranslator(authorizer)); + AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), PrivilegeUtils.getAuthorizationTranslator(authorizer)); HivePrincipal grantorPrincipal = null; if (desc.getGrantor() != null) { grantorPrincipal = diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/SetRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/SetRoleOperation.java index d119fe4a28..590e015630 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/SetRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/SetRoleOperation.java @@ -37,7 +37,7 @@ public SetRoleOperation(DDLOperationContext context, SetRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); authorizer.setCurrentRole(desc.getName()); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowCurrentRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowCurrentRoleOperation.java index 9738ddbcc0..9e83ece15c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowCurrentRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowCurrentRoleOperation.java @@ -40,9 +40,9 @@ public ShowCurrentRoleOperation(DDLOperationContext context, ShowCurrentRoleDesc @Override public int execute() throws HiveException, IOException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); List roleNames = authorizer.getCurrentRoleNames(); - RoleUtils.writeListToFileAfterSort(roleNames, desc.getResFile(), context); + PrivilegeUtils.writeListToFileAfterSort(roleNames, desc.getResFile(), context); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowGrantOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowGrantOperation.java index 50b41800a1..0affff03a0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowGrantOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowGrantOperation.java @@ -48,11 +48,11 @@ public ShowGrantOperation(DDLOperationContext context, ShowGrantDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); try { List privInfos = authorizer.showPrivileges( - RoleUtils.getAuthorizationTranslator(authorizer).getHivePrincipal(desc.getPrincipalDesc()), - RoleUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getHiveObj())); + PrivilegeUtils.getAuthorizationTranslator(authorizer).getHivePrincipal(desc.getPrincipalDesc()), + PrivilegeUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getHiveObj())); boolean testMode = context.getConf().getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST); DDLUtils.writeToFile(writeGrantInfo(privInfos, testMode), desc.getResFile(), context); } catch (IOException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowPrincipalsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowPrincipalsOperation.java index 392142ba14..73f1030d76 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowPrincipalsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowPrincipalsOperation.java @@ -44,7 +44,7 @@ public ShowPrincipalsOperation(DDLOperationContext context, ShowPrincipalsDesc d @Override public int execute() throws HiveException, IOException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); boolean testMode = context.getConf().getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST); List roleGrants = authorizer.getPrincipalGrantInfoForRole(desc.getName()); DDLUtils.writeToFile(writeHiveRoleGrantInfo(roleGrants, testMode), desc.getResFile(), context); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRoleGrantOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRoleGrantOperation.java index 178ea8e3bc..e3e4e1409c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRoleGrantOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRoleGrantOperation.java @@ -45,7 +45,7 @@ public ShowRoleGrantOperation(DDLOperationContext context, ShowRoleGrantDesc des @Override public int execute() throws HiveException, IOException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); boolean testMode = context.getConf().getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST); List roles = authorizer.getRoleGrantInfoForPrincipal( AuthorizationUtils.getHivePrincipal(desc.getName(), desc.getPrincipalType())); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRolesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRolesOperation.java index 22ca7f350d..0597eaffe2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRolesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRolesOperation.java @@ -40,9 +40,9 @@ public ShowRolesOperation(DDLOperationContext context, ShowRolesDesc desc) { @Override public int execute() throws HiveException, IOException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); List allRoles = authorizer.getAllRoles(); - RoleUtils.writeListToFileAfterSort(allRoles, desc.getResFile(), context); + PrivilegeUtils.writeListToFileAfterSort(allRoles, desc.getResFile(), context); return 0; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java new file mode 100644 index 0000000000..0479e0ad56 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER POOL ... ADD TRIGGER commands. + */ +@Explain(displayName = "Create Trigger to pool mappings", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterPoolAddTriggerDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 383046258694558029L; + + static { + DDLTask2.registerOperation(AlterPoolAddTriggerDesc.class, AlterPoolAddTriggerOperation.class); + } + + private final String planName; + private final String triggerName; + private final String poolPath; + private final boolean isUnmanagedPool; + + public AlterPoolAddTriggerDesc(String planName, String triggerName, String poolPath, boolean isUnmanagedPool) { + this.planName = planName; + this.triggerName = triggerName; + this.poolPath = poolPath; + this.isUnmanagedPool = isUnmanagedPool; + } + + @Explain(displayName = "resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; + } + + @Explain(displayName = "Trigger name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerName() { + return triggerName; + } + + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPathForExplain() { + return isUnmanagedPool ? "" : poolPath; + } + + public String getPoolPath() { + return poolPath; + } + + public boolean isUnmanagedPool() { + return isUnmanagedPool; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java new file mode 100644 index 0000000000..e6bee58205 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of adding a trigger to a pool. + */ +public class AlterPoolAddTriggerOperation extends DDLOperation { + private final AlterPoolAddTriggerDesc desc; + + public AlterPoolAddTriggerOperation(DDLOperationContext context, AlterPoolAddTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + if (!desc.isUnmanagedPool()) { + context.getDb().createOrDropTriggerToPoolMapping(desc.getPlanName(), desc.getTriggerName(), desc.getPoolPath(), + false); + } else { + assert desc.getPoolPath() == null; + WMTrigger trigger = new WMTrigger(desc.getPlanName(), desc.getTriggerName()); + // If we are dropping from unmanaged, unset the flag; and vice versa + trigger.setIsInUnmanaged(true); + context.getDb().alterWMTrigger(trigger); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java new file mode 100644 index 0000000000..ecd800fec3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER POOL ... DROP TRIGGER commands. + */ +@Explain(displayName = "Drop Trigger to pool mappings", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterPoolDropTriggerDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 383046258694558029L; + + static { + DDLTask2.registerOperation(AlterPoolDropTriggerDesc.class, AlterPoolDropTriggerOperation.class); + } + + private final String planName; + private final String triggerName; + private final String poolPath; + private final boolean isUnmanagedPool; + + public AlterPoolDropTriggerDesc(String planName, String triggerName, String poolPath, boolean isUnmanagedPool) { + this.planName = planName; + this.triggerName = triggerName; + this.poolPath = poolPath; + this.isUnmanagedPool = isUnmanagedPool; + } + + @Explain(displayName = "resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; + } + + @Explain(displayName = "Trigger name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerName() { + return triggerName; + } + + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPathForExplain() { + return isUnmanagedPool ? "" : poolPath; + } + + public String getPoolPath() { + return poolPath; + } + + public boolean isUnmanagedPool() { + return isUnmanagedPool; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java new file mode 100644 index 0000000000..fc894051b3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a trigger to pool mapping. + */ +public class AlterPoolDropTriggerOperation extends DDLOperation { + private final AlterPoolDropTriggerDesc desc; + + public AlterPoolDropTriggerOperation(DDLOperationContext context, AlterPoolDropTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + if (!desc.isUnmanagedPool()) { + context.getDb().createOrDropTriggerToPoolMapping(desc.getPlanName(), desc.getTriggerName(), desc.getPoolPath(), + true); + } else { + assert desc.getPoolPath() == null; + WMTrigger trigger = new WMTrigger(desc.getPlanName(), desc.getTriggerName()); + // If we are dropping from unmanaged, unset the flag; and vice versa + trigger.setIsInUnmanaged(false); + context.getDb().alterWMTrigger(trigger); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java similarity index 53% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java index dd2eaced23..4e7a699f5b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java @@ -16,101 +16,77 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Alter Resource plans", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterResourcePlanDesc extends DDLDesc implements Serializable { +/** + * DDL task description for ALTER RESOURCE PLAN commands. + */ +@Explain(displayName = "Alter Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -3514685833183437279L; - private WMNullableResourcePlan resourcePlan; - private String rpName; - private boolean validate; - private boolean isEnableActivate, isForceDeactivate, isReplace; - private String resFile; + static { + DDLTask2.registerOperation(AlterResourcePlanDesc.class, AlterResourcePlanOperation.class); + } + + public static final String SCHEMA = "error#string"; - public AlterResourcePlanDesc() {} + private final WMNullableResourcePlan resourcePlan; + private final String planName; + private final boolean validate; + private final boolean isEnableActivate; + private final boolean isForceDeactivate; + private final boolean isReplace; + private final String resFile; - public AlterResourcePlanDesc(WMNullableResourcePlan resourcePlan, String rpName, boolean validate, - boolean isEnableActivate, boolean isForceDeactivate, boolean isReplace) { + public AlterResourcePlanDesc(WMNullableResourcePlan resourcePlan, String planName, boolean validate, + boolean isEnableActivate, boolean isForceDeactivate, boolean isReplace, String resFile) { this.resourcePlan = resourcePlan; - this.rpName = rpName; + this.planName = planName; this.validate = validate; this.isEnableActivate = isEnableActivate; this.isForceDeactivate = isForceDeactivate; this.isReplace = isReplace; + this.resFile = resFile; } - @Explain(displayName="Resource plan changed fields", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="Resource plan changed fields", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public WMNullableResourcePlan getResourcePlan() { return resourcePlan; } - public void setResourcePlan(WMNullableResourcePlan resourcePlan) { - this.resourcePlan = resourcePlan; - } - - @Explain(displayName="Resource plan to modify", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getResourcePlanName() { - return rpName; - } - - public void setResourcePlanName(String rpName) { - this.rpName = rpName; + @Explain(displayName="Resource plan to modify", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; } - @Explain(displayName="shouldValidate", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="shouldValidate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public boolean shouldValidate() { return validate; } - public void setValidate(boolean validate) { - this.validate = validate; - } - public boolean isEnableActivate() { return isEnableActivate; } - public void setIsEnableActivate(boolean b) { - this.isEnableActivate = b; - } - public boolean isForceDeactivate() { return isForceDeactivate; } - public void setIsForceDeactivate(boolean b) { - this.isForceDeactivate = b; - } - public boolean isReplace() { return isReplace; } - public void setIsReplace(boolean b) { - this.isReplace = b; - } - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - - public void setResFile(String resFile) { - this.resFile = resFile; - } - - public static String getSchema() { - return "error#string"; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java new file mode 100644 index 0000000000..89fa0a3032 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager; +import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.Collection; +import java.util.concurrent.ExecutionException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +import com.google.common.util.concurrent.ListenableFuture; + +/** + * Operation process of altering a resource plan. + */ +public class AlterResourcePlanOperation extends DDLOperation { + private final AlterResourcePlanDesc desc; + + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanOperation(DDLOperationContext context, AlterResourcePlanDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + if (desc.shouldValidate()) { + WMValidateResourcePlanResponse result = context.getDb().validateResourcePlan(desc.getPlanName()); + try (DataOutputStream out = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + context.getFormatter().showErrors(out, result); + } catch (IOException e) { + throw new HiveException(e); + } + return 0; + } + + WMNullableResourcePlan resourcePlan = desc.getResourcePlan(); + WMFullResourcePlan appliedResourcePlan = context.getDb().alterResourcePlan(desc.getPlanName(), resourcePlan, + desc.isEnableActivate(), desc.isForceDeactivate(), desc.isReplace()); + + boolean isActivate = resourcePlan.getStatus() != null && resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE; + boolean mustHaveAppliedChange = isActivate || desc.isForceDeactivate(); + if (!mustHaveAppliedChange && !desc.isReplace()) { + return 0; // The modification cannot affect an active plan. + } + if (appliedResourcePlan == null && !mustHaveAppliedChange) { + return 0; // Replacing an inactive plan. + } + + WorkloadManager wm = WorkloadManager.getInstance(); + boolean isInTest = HiveConf.getBoolVar(context.getConf(), ConfVars.HIVE_IN_TEST); + if (wm == null && isInTest) { + return 0; // Skip for tests if WM is not present. + } + + if ((appliedResourcePlan == null) != desc.isForceDeactivate()) { + throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable"); + // TODO: shut down HS2? + } + assert appliedResourcePlan == null || appliedResourcePlan.getPlan().getStatus() == WMResourcePlanStatus.ACTIVE; + + handleWorkloadManagementServiceChange(wm, isActivate, appliedResourcePlan); + + return 0; + } + + private int handleWorkloadManagementServiceChange(WorkloadManager wm, boolean isActivate, + WMFullResourcePlan appliedResourcePlan) throws HiveException { + String name = null; + if (isActivate) { + name = appliedResourcePlan.getPlan().getName(); + LOG.info("Activating a new resource plan " + name + ": " + appliedResourcePlan); + } else { + LOG.info("Disabling workload management"); + } + + if (wm != null) { + // Note: as per our current constraints, the behavior of two parallel activates is + // undefined; although only one will succeed and the other will receive exception. + // We need proper (semi-)transactional modifications to support this without hacks. + ListenableFuture future = wm.updateResourcePlanAsync(appliedResourcePlan); + boolean isOk = false; + try { + // Note: we may add an async option in future. For now, let the task fail for the user. + future.get(); + isOk = true; + if (isActivate) { + LOG.info("Successfully activated resource plan " + name); + } else { + LOG.info("Successfully disabled workload management"); + } + } catch (InterruptedException | ExecutionException e) { + throw new HiveException(e); + } finally { + if (!isOk) { + if (isActivate) { + LOG.error("Failed to activate resource plan " + name); + } else { + LOG.error("Failed to disable workload management"); + } + // TODO: shut down HS2? + } + } + } + + TezSessionPoolManager pm = TezSessionPoolManager.getInstance(); + if (pm != null) { + Collection appliedTriggers = pm.updateTriggers(appliedResourcePlan); + LOG.info("Updated tez session pool manager with active resource plan: {} appliedTriggers: {}", name, + appliedTriggers); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java new file mode 100644 index 0000000000..3987a3711b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER MAPPING commands. + */ +@Explain(displayName = "Alter Mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMMappingDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = -442968568922083053L; + + static { + DDLTask2.registerOperation(AlterWMMappingDesc.class, AlterWMMappingOperation.class); + } + + private final WMMapping mapping; + + public AlterWMMappingDesc(WMMapping mapping) { + this.mapping = mapping; + } + + @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public WMMapping getMapping() { + return mapping; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java new file mode 100644 index 0000000000..cd873c05f7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of altering a wm mapping. + */ +public class AlterWMMappingOperation extends DDLOperation { + private final AlterWMMappingDesc desc; + + public AlterWMMappingOperation(DDLOperationContext context, AlterWMMappingDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().createOrUpdateWMMapping(desc.getMapping(), true); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java new file mode 100644 index 0000000000..9ce8b2687e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.api.WMNullablePool; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER POOL commands. + */ +@Explain(displayName = "Alter Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMPoolDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 4872940135771213510L; + + static { + DDLTask2.registerOperation(AlterWMPoolDesc.class, AlterWMPoolOperation.class); + } + + private final WMNullablePool pool; + private final String poolPath; + + public AlterWMPoolDesc(WMNullablePool pool, String poolPath) { + this.pool = pool; + this.poolPath = poolPath; + } + + @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public WMNullablePool getPool() { + return pool; + } + + @Explain(displayName="poolPath", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java new file mode 100644 index 0000000000..6aebbd1d30 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of altering a wm pool. + */ +public class AlterWMPoolOperation extends DDLOperation { + private final AlterWMPoolDesc desc; + + public AlterWMPoolOperation(DDLOperationContext context, AlterWMPoolDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().alterWMPool(desc.getPool(), desc.getPoolPath()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java similarity index 63% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java index 677a47caca..9301626016 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java @@ -16,33 +16,35 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName="Alter WM Trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterWMTriggerDesc extends DDLDesc implements Serializable { +/** + * DDL task description for ALTER TRIGGER commands. + */ +@Explain(displayName="Alter WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -2105736261687539210L; - private WMTrigger trigger; + static { + DDLTask2.registerOperation(AlterWMTriggerDesc.class, AlterWMTriggerOperation.class); + } - public AlterWMTriggerDesc() {} + private final WMTrigger trigger; public AlterWMTriggerDesc(WMTrigger trigger) { this.trigger = trigger; } - @Explain(displayName="trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) public WMTrigger getTrigger() { return trigger; } - - public void setTrigger(WMTrigger trigger) { - this.trigger = trigger; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java new file mode 100644 index 0000000000..6662c95855 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of altering a wm trigger. + */ +public class AlterWMTriggerOperation extends DDLOperation { + private final AlterWMTriggerDesc desc; + + public AlterWMTriggerOperation(DDLOperationContext context, AlterWMTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + WMUtils.validateTrigger(desc.getTrigger()); + context.getDb().alterWMTrigger(desc.getTrigger()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java similarity index 62% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java index 9c18f59d09..f7c3a50efc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java @@ -16,37 +16,46 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; +/** + * DDL task description for CREATE RESOURCE PLAN commands. + */ @Explain(displayName = "Create ResourcePlan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateResourcePlanDesc extends DDLDesc implements Serializable { +public class CreateResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -3492803425541479414L; - private WMResourcePlan resourcePlan; + static { + DDLTask2.registerOperation(CreateResourcePlanDesc.class, CreateResourcePlanOperation.class); + } + + private final String planName; + private final Integer queryParallelism; private String copyFromName; private boolean ifNotExists; - // For serialization only. - public CreateResourcePlanDesc() { - } - - public CreateResourcePlanDesc(String planName, Integer queryParallelism, String copyFromName, - boolean ifNotExists) { - resourcePlan = new WMResourcePlan(planName); - if (queryParallelism != null) { - resourcePlan.setQueryParallelism(queryParallelism); - } + public CreateResourcePlanDesc(String planName, Integer queryParallelism, String copyFromName, boolean ifNotExists) { + this.planName = planName; + this.queryParallelism = queryParallelism; this.copyFromName = copyFromName; this.ifNotExists = ifNotExists; } - @Explain(displayName="resourcePlan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMResourcePlan getResourcePlan() { - return resourcePlan; + @Explain(displayName="planName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; + } + + @Explain(displayName="queryParallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getQueryParallelism() { + return queryParallelism; } @Explain(displayName="Copy from", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -59,4 +68,4 @@ public String getCopyFromName() { public boolean getIfNotExists() { return ifNotExists; } -} \ No newline at end of file +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java new file mode 100644 index 0000000000..872e9983c0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of creating a resource plan. + */ +public class CreateResourcePlanOperation extends DDLOperation { + private final CreateResourcePlanDesc desc; + + public CreateResourcePlanOperation(DDLOperationContext context, CreateResourcePlanDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + WMResourcePlan plan = new WMResourcePlan(desc.getPlanName()); + if (desc.getQueryParallelism() != null) { + plan.setQueryParallelism(desc.getQueryParallelism()); + } + + context.getDb().createResourcePlan(plan, desc.getCopyFromName(), desc.getIfNotExists()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java similarity index 62% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java index 148e73212c..886ad99be7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java @@ -15,44 +15,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Create/Alter Mapping", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateOrAlterWMMappingDesc extends DDLDesc implements Serializable { +/** + * DDL task description for CREATE MAPPING commands. + */ +@Explain(displayName = "Create Mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMMappingDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -442968568922083053L; - private WMMapping mapping; - private boolean update; + static { + DDLTask2.registerOperation(CreateWMMappingDesc.class, CreateWMMappingOperation.class); + } - public CreateOrAlterWMMappingDesc() {} + private final WMMapping mapping; - public CreateOrAlterWMMappingDesc(WMMapping mapping, boolean update) { + public CreateWMMappingDesc(WMMapping mapping) { this.mapping = mapping; - this.update = update; } @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public WMMapping getMapping() { return mapping; } - - public void setMapping(WMMapping mapping) { - this.mapping = mapping; - } - - @Explain(displayName = "update", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public boolean isUpdate() { - return update; - } - - public void setUpdate(boolean update) { - this.update = update; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java new file mode 100644 index 0000000000..71046ca7b0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of creating a wm mapping. + */ +public class CreateWMMappingOperation extends DDLOperation { + private final CreateWMMappingDesc desc; + + public CreateWMMappingOperation(DDLOperationContext context, CreateWMMappingDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().createOrUpdateWMMapping(desc.getMapping(), false); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java new file mode 100644 index 0000000000..93c38d4f7d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for CREATE POOL commands. + */ +@Explain(displayName = "Create Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMPoolDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 4872940135771213510L; + + static { + DDLTask2.registerOperation(CreateWMPoolDesc.class, CreateWMPoolOperation.class); + } + + private final WMPool pool; + + public CreateWMPoolDesc(WMPool pool) { + this.pool = pool; + } + + @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public WMPool getPool() { + return pool; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java new file mode 100644 index 0000000000..6936ad7c5b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of creating a wm pool. + */ +public class CreateWMPoolOperation extends DDLOperation { + private final CreateWMPoolDesc desc; + + public CreateWMPoolOperation(DDLOperationContext context, CreateWMPoolDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().createWMPool(desc.getPool()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java similarity index 63% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java index 8eb97291ff..e6d9435ede 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java @@ -16,33 +16,35 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName="Create WM Trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateWMTriggerDesc extends DDLDesc implements Serializable { +/** + * DDL task description for CREATE TRIGGER commands. + */ +@Explain(displayName="Create WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1705317739121300923L; - private WMTrigger trigger; + static { + DDLTask2.registerOperation(CreateWMTriggerDesc.class, CreateWMTriggerOperation.class); + } - public CreateWMTriggerDesc() {} + private final WMTrigger trigger; public CreateWMTriggerDesc(WMTrigger trigger) { this.trigger = trigger; } - @Explain(displayName="trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) public WMTrigger getTrigger() { return trigger; } - - public void setTrigger(WMTrigger trigger) { - this.trigger = trigger; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java new file mode 100644 index 0000000000..13b60103f5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of creating a wm trigger. + */ +public class CreateWMTriggerOperation extends DDLOperation { + private final CreateWMTriggerDesc desc; + + public CreateWMTriggerOperation(DDLOperationContext context, CreateWMTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + WMUtils.validateTrigger(desc.getTrigger()); + context.getDb().createWMTrigger(desc.getTrigger()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java similarity index 73% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java index efaf0789b0..ef7c723524 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java @@ -16,29 +16,32 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; +/** + * DDL task description for DROP RESOURCE PLAN commands. + */ @Explain(displayName = "Drop Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropResourcePlanDesc extends DDLDesc implements Serializable { +public class DropResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1258596919510047766L; - private String rpName; - private boolean ifExists; - - public DropResourcePlanDesc() {} + private final String planName; + private final boolean ifExists; - public DropResourcePlanDesc(String rpName, boolean ifExists) { - this.setRpName(rpName); - this.setIfExists(ifExists); + public DropResourcePlanDesc(String planName, boolean ifExists) { + this.planName = planName; + this.ifExists = ifExists; } @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getRpName() { - return rpName; + public String getPlanName() { + return planName; } @Explain(displayName="ifExists", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, @@ -46,13 +49,4 @@ public String getRpName() { public boolean getIfExists() { return ifExists; } - - public void setRpName(String rpName) { - this.rpName = rpName; - } - - public void setIfExists(boolean ifExists) { - this.ifExists = ifExists; - } - -} \ No newline at end of file +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java new file mode 100644 index 0000000000..622ff879b0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a resource plan. + */ +public class DropResourcePlanOperation extends DDLOperation { + private final DropResourcePlanDesc desc; + + public DropResourcePlanOperation(DDLOperationContext context, DropResourcePlanDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().dropResourcePlan(desc.getPlanName(), desc.getIfExists()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java similarity index 69% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java index 54becd6283..29cf225b6c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java @@ -15,21 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Drop resource plan", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropWMMappingDesc extends DDLDesc implements Serializable { +/** + * DDL task description for DROP MAPPING commands. + */ +@Explain(displayName = "Drop mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropWMMappingDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -1567558687529244218L; - private WMMapping mapping; + static { + DDLTask2.registerOperation(DropWMMappingDesc.class, DropWMMappingOperation.class); + } - public DropWMMappingDesc() {} + private final WMMapping mapping; public DropWMMappingDesc(WMMapping mapping) { this.mapping = mapping; @@ -39,8 +46,4 @@ public DropWMMappingDesc(WMMapping mapping) { public WMMapping getMapping() { return mapping; } - - public void setMapping(WMMapping mapping) { - this.mapping = mapping; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java new file mode 100644 index 0000000000..fec372e57e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a wm mapping. + */ +public class DropWMMappingOperation extends DDLOperation { + private final DropWMMappingDesc desc; + + public DropWMMappingOperation(DDLOperationContext context, DropWMMappingDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().dropWMMapping(desc.getMapping()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java similarity index 63% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java index cad6c1e396..87f79510cc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java @@ -15,36 +15,36 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; -public class DropWMPoolDesc extends DDLDesc implements Serializable { +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; + +/** + * DDL task description for DROP POOL commands. + */ +public class DropWMPoolDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -2608462103392563252L; - private String resourcePlanName; - private String poolPath; + static { + DDLTask2.registerOperation(DropWMPoolDesc.class, DropWMPoolOperation.class); + } - public DropWMPoolDesc() {} + private final String planName; + private final String poolPath; - public DropWMPoolDesc(String resourcePlanName, String poolPath) { - this.resourcePlanName = resourcePlanName; + public DropWMPoolDesc(String planName, String poolPath) { + this.planName = planName; this.poolPath = poolPath; } - public String getResourcePlanName() { - return resourcePlanName; - } - - public void setResourcePlanName(String resourcePlanName) { - this.resourcePlanName = resourcePlanName; + public String getPlanName() { + return planName; } public String getPoolPath() { return poolPath; } - - public void setPoolPath(String poolPath) { - this.poolPath = poolPath; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java new file mode 100644 index 0000000000..0ae1e025a9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a wm pool. + */ +public class DropWMPoolOperation extends DDLOperation { + private final DropWMPoolDesc desc; + + public DropWMPoolOperation(DDLOperationContext context, DropWMPoolDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().dropWMPool(desc.getPlanName(), desc.getPoolPath()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java similarity index 53% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java index da7c18310c..089b78a8a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java @@ -16,44 +16,41 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName="Drop WM Trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropWMTriggerDesc extends DDLDesc implements Serializable { +/** + * DDL task description for DROP TRIGGER commands. + */ +@Explain(displayName="Drop WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropWMTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 963803766313787632L; - private String rpName; - private String triggerName; + static { + DDLTask2.registerOperation(DropWMTriggerDesc.class, DropWMTriggerOperation.class); + } - public DropWMTriggerDesc() {} + private final String planName; + private final String triggerName; - public DropWMTriggerDesc(String rpName, String triggerName) { - this.rpName = rpName; + public DropWMTriggerDesc(String planName, String triggerName) { + this.planName = planName; this.triggerName = triggerName; } - @Explain(displayName="resourcePlanName", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getRpName() { - return rpName; - } - - public void setRpName(String rpName) { - this.rpName = rpName; + @Explain(displayName="resourcePlanName", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; } - @Explain(displayName="triggerName", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="triggerName", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTriggerName() { return triggerName; } - - public void setTriggerName(String triggerName) { - this.triggerName = triggerName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java new file mode 100644 index 0000000000..fa2507853d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a wm trigger. + */ +public class DropWMTriggerOperation extends DDLOperation { + private final DropWMTriggerDesc desc; + + public DropWMTriggerOperation(DDLOperationContext context, DropWMTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().dropWMTrigger(desc.getPlanName(), desc.getTriggerName()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java similarity index 59% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java index 370f0d9e78..1f4b5cfd71 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java @@ -16,53 +16,48 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Show Resource plans", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowResourcePlanDesc extends DDLDesc implements Serializable { +/** + * DDL task description for SHOW RESOURCE PLAN(S) commands. + */ +@Explain(displayName = "Show Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 6076076933035978545L; - private static final String TABLE = "show_resourceplan"; + static { + DDLTask2.registerOperation(ShowResourcePlanDesc.class, ShowResourcePlanOperation.class); + } + private static final String ALL_SCHEMA = "rp_name,status,query_parallelism#string,string,int"; private static final String SINGLE_SCHEMA = "line#string"; - String resFile; - String resourcePlanName; - - // For serialization only. - public ShowResourcePlanDesc() {} - - public ShowResourcePlanDesc(String rpName, Path resFile) { - this.resourcePlanName = rpName; - this.resFile = resFile.toString(); - } - - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } + private final String planName; + private final String resFile; - public void setResFile(String resFile) { + public ShowResourcePlanDesc(String planName, String resFile) { + this.planName = planName; this.resFile = resFile; } - @Explain(displayName="resourcePlanName", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getResourcePlanName() { - return resourcePlanName; + return planName; } - public String getTable() { - return TABLE; + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; } - public String getSchema(String rpName) { - return (rpName == null) ? ALL_SCHEMA : SINGLE_SCHEMA; + public String getSchema() { + return (planName == null) ? ALL_SCHEMA : SINGLE_SCHEMA; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java new file mode 100644 index 0000000000..49a50d3e6c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of showing resource plans. + */ +public class ShowResourcePlanOperation extends DDLOperation { + private final ShowResourcePlanDesc desc; + + public ShowResourcePlanOperation(DDLOperationContext context, ShowResourcePlanDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + // TODO: Enhance showResourcePlan to display all the pools, triggers and mappings. + try (DataOutputStream out = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + String planName = desc.getResourcePlanName(); + if (planName != null) { + context.getFormatter().showFullResourcePlan(out, context.getDb().getResourcePlan(planName)); + } else { + context.getFormatter().showResourcePlans(out, context.getDb().getAllResourcePlans()); + } + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java new file mode 100644 index 0000000000..4860ee7dcd --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.wm.ExecutionTrigger; + +/** + * Common utilities for Workload Management related ddl operations. + */ +final class WMUtils { + private WMUtils() { + throw new UnsupportedOperationException("WMUtils should not be instantiated"); + } + + static void validateTrigger(WMTrigger trigger) throws HiveException { + try { + ExecutionTrigger.fromWMTrigger(trigger); + } catch (IllegalArgumentException e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java new file mode 100644 index 0000000000..8e314243d0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Workload Management related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 13d7d6fcdb..7c9d910c20 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -25,7 +25,6 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Iterator; @@ -35,13 +34,11 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.concurrent.ExecutionException; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ListenableFuture; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; @@ -76,11 +73,6 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.TxnInfo; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.CompilationOpContext; @@ -90,9 +82,7 @@ import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.ArchiveUtils.PartSpecInfo; -import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager; import org.apache.hadoop.hive.ql.exec.tez.TezTask; -import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -126,25 +116,14 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; -import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; -import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; -import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; -import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; -import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc; import org.apache.hadoop.hive.ql.plan.KillQueryDesc; @@ -162,13 +141,11 @@ import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.wm.ExecutionTrigger; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; @@ -362,54 +339,6 @@ public int execute(DriverContext driverContext) { return killQuery(db, killQueryDesc); } - if (work.getCreateResourcePlanDesc() != null) { - return createResourcePlan(db, work.getCreateResourcePlanDesc()); - } - - if (work.getShowResourcePlanDesc() != null) { - return showResourcePlans(db, work.getShowResourcePlanDesc()); - } - - if (work.getAlterResourcePlanDesc() != null) { - return alterResourcePlan(db, work.getAlterResourcePlanDesc()); - } - - if (work.getDropResourcePlanDesc() != null) { - return dropResourcePlan(db, work.getDropResourcePlanDesc()); - } - - if (work.getCreateWMTriggerDesc() != null) { - return createWMTrigger(db, work.getCreateWMTriggerDesc()); - } - - if (work.getAlterWMTriggerDesc() != null) { - return alterWMTrigger(db, work.getAlterWMTriggerDesc()); - } - - if (work.getDropWMTriggerDesc() != null) { - return dropWMTrigger(db, work.getDropWMTriggerDesc()); - } - - if (work.getWmPoolDesc() != null) { - return createOrAlterWMPool(db, work.getWmPoolDesc()); - } - - if (work.getDropWMPoolDesc() != null) { - return dropWMPool(db, work.getDropWMPoolDesc()); - } - - if (work.getWmMappingDesc() != null) { - return createOrAlterWMMapping(db, work.getWmMappingDesc()); - } - - if (work.getDropWMMappingDesc() != null) { - return dropWMMapping(db, work.getDropWMMappingDesc()); - } - - if (work.getTriggerToPoolMappingDesc() != null) { - return createOrDropTriggerToPoolMapping(db, work.getTriggerToPoolMappingDesc()); - } - if (work.getReplSetFirstIncLoadFlagDesc() != null) { return remFirstIncPendFlag(db, work.getReplSetFirstIncLoadFlagDesc()); } @@ -421,192 +350,6 @@ public int execute(DriverContext driverContext) { return 0; } - private int createResourcePlan(Hive db, CreateResourcePlanDesc createResourcePlanDesc) - throws HiveException { - db.createResourcePlan(createResourcePlanDesc.getResourcePlan(), - createResourcePlanDesc.getCopyFromName(), createResourcePlanDesc.getIfNotExists()); - return 0; - } - - private int showResourcePlans(Hive db, ShowResourcePlanDesc showResourcePlanDesc) - throws HiveException { - // Note: Enhance showResourcePlan to display all the pools, triggers and mappings. - DataOutputStream out = getOutputStream(showResourcePlanDesc.getResFile()); - try { - String rpName = showResourcePlanDesc.getResourcePlanName(); - if (rpName != null) { - formatter.showFullResourcePlan(out, db.getResourcePlan(rpName)); - } else { - formatter.showResourcePlans(out, db.getAllResourcePlans()); - } - } catch (Exception e) { - throw new HiveException(e); - } finally { - IOUtils.closeStream(out); - } - return 0; - } - - // Note: the resource plan operations are going to be annotated with namespace based on the config - // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see - // that there exist other namespaces, because one HS2 always operates inside just one and we - // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about - // namespaces - Hive.java will transparently scope everything. That's the idea anyway. - private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException { - if (desc.shouldValidate()) { - WMValidateResourcePlanResponse result = db.validateResourcePlan(desc.getResourcePlanName()); - try (DataOutputStream out = getOutputStream(desc.getResFile())) { - formatter.showErrors(out, result); - } catch (IOException e) { - throw new HiveException(e); - }; - return 0; - } - - WMNullableResourcePlan resourcePlan = desc.getResourcePlan(); - final WorkloadManager wm = WorkloadManager.getInstance(); - final TezSessionPoolManager pm = TezSessionPoolManager.getInstance(); - boolean isActivate = false, isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); - if (resourcePlan.getStatus() != null) { - isActivate = resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE; - } - - WMFullResourcePlan appliedRp = db.alterResourcePlan(desc.getResourcePlanName(), resourcePlan, - desc.isEnableActivate(), desc.isForceDeactivate(), desc.isReplace()); - boolean mustHaveAppliedChange = isActivate || desc.isForceDeactivate(); - if (!mustHaveAppliedChange && !desc.isReplace()) { - return 0; // The modification cannot affect an active plan. - } - if (appliedRp == null && !mustHaveAppliedChange) { - return 0; // Replacing an inactive plan. - } - if (wm == null && isInTest) { - return 0; // Skip for tests if WM is not present. - } - - if ((appliedRp == null) != desc.isForceDeactivate()) { - throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable"); - // TODO: shut down HS2? - } - assert appliedRp == null || appliedRp.getPlan().getStatus() == WMResourcePlanStatus.ACTIVE; - - handleWorkloadManagementServiceChange(wm, pm, isActivate, appliedRp); - return 0; - } - - private int handleWorkloadManagementServiceChange(WorkloadManager wm, TezSessionPoolManager pm, - boolean isActivate, WMFullResourcePlan appliedRp) throws HiveException { - String name = null; - if (isActivate) { - name = appliedRp.getPlan().getName(); - LOG.info("Activating a new resource plan " + name + ": " + appliedRp); - } else { - LOG.info("Disabling workload management"); - } - if (wm != null) { - // Note: as per our current constraints, the behavior of two parallel activates is - // undefined; although only one will succeed and the other will receive exception. - // We need proper (semi-)transactional modifications to support this without hacks. - ListenableFuture future = wm.updateResourcePlanAsync(appliedRp); - boolean isOk = false; - try { - // Note: we may add an async option in future. For now, let the task fail for the user. - future.get(); - isOk = true; - if (isActivate) { - LOG.info("Successfully activated resource plan " + name); - } else { - LOG.info("Successfully disabled workload management"); - } - } catch (InterruptedException | ExecutionException e) { - throw new HiveException(e); - } finally { - if (!isOk) { - if (isActivate) { - LOG.error("Failed to activate resource plan " + name); - } else { - LOG.error("Failed to disable workload management"); - } - // TODO: shut down HS2? - } - } - } - if (pm != null) { - Collection appliedTriggers = pm.updateTriggers(appliedRp); - LOG.info("Updated tez session pool manager with active resource plan: {} appliedTriggers: {}", name, appliedTriggers); - } - return 0; - } - - private int dropResourcePlan(Hive db, DropResourcePlanDesc desc) throws HiveException { - db.dropResourcePlan(desc.getRpName(), desc.getIfExists()); - return 0; - } - - private int createWMTrigger(Hive db, CreateWMTriggerDesc desc) throws HiveException { - validateTrigger(desc.getTrigger()); - db.createWMTrigger(desc.getTrigger()); - return 0; - } - - private void validateTrigger(final WMTrigger trigger) throws HiveException { - try { - ExecutionTrigger.fromWMTrigger(trigger); - } catch (IllegalArgumentException e) { - throw new HiveException(e); - } - } - - private int alterWMTrigger(Hive db, AlterWMTriggerDesc desc) throws HiveException { - validateTrigger(desc.getTrigger()); - db.alterWMTrigger(desc.getTrigger()); - return 0; - } - - private int dropWMTrigger(Hive db, DropWMTriggerDesc desc) throws HiveException { - db.dropWMTrigger(desc.getRpName(), desc.getTriggerName()); - return 0; - } - - private int createOrAlterWMPool(Hive db, CreateOrAlterWMPoolDesc desc) throws HiveException { - if (desc.isUpdate()) { - db.alterWMPool(desc.getAlterPool(), desc.getPoolPath()); - } else { - db.createWMPool(desc.getCreatePool()); - } - return 0; - } - - private int dropWMPool(Hive db, DropWMPoolDesc desc) throws HiveException { - db.dropWMPool(desc.getResourcePlanName(), desc.getPoolPath()); - return 0; - } - - private int createOrAlterWMMapping(Hive db, CreateOrAlterWMMappingDesc desc) throws HiveException { - db.createOrUpdateWMMapping(desc.getMapping(), desc.isUpdate()); - return 0; - } - - private int dropWMMapping(Hive db, DropWMMappingDesc desc) throws HiveException { - db.dropWMMapping(desc.getMapping()); - return 0; - } - - private int createOrDropTriggerToPoolMapping(Hive db, CreateOrDropTriggerToPoolMappingDesc desc) - throws HiveException { - if (!desc.isUnmanagedPool()) { - db.createOrDropTriggerToPoolMapping(desc.getResourcePlanName(), desc.getTriggerName(), - desc.getPoolPath(), desc.shouldDrop()); - } else { - assert desc.getPoolPath() == null; - WMTrigger trigger = new WMTrigger(desc.getResourcePlanName(), desc.getTriggerName()); - // If we are dropping from unmanaged, unset the flag; and vice versa - trigger.setIsInUnmanaged(!desc.shouldDrop()); - db.alterWMTrigger(trigger); - } - return 0; - } - private int insertCommitWork(Hive db, InsertCommitHookDesc insertCommitHookDesc) throws MetaException { boolean failed = true; HiveMetaHook hook = insertCommitHookDesc.getTable().getStorageHandler().getMetaHook(); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index d2c3f7b1a6..bd6ae715d6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -97,6 +97,21 @@ import org.apache.hadoop.hive.ql.ddl.table.ShowTablePropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolAddTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolDropTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterResourcePlanDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMMappingDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMPoolDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateResourcePlanDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMMappingDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMPoolDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropResourcePlanDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMMappingDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMPoolDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -128,29 +143,18 @@ import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; -import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.BasicStatsWork; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; -import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; -import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.DDLDesc; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; -import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; -import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; -import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -168,7 +172,6 @@ import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; @@ -925,10 +928,9 @@ private void analyzeCreateResourcePlan(ASTNode ast) throws SemanticException { default: throw new SemanticException("Invalid create arguments " + ast.toStringTree()); } } - CreateResourcePlanDesc desc = new CreateResourcePlanDesc( - resourcePlanName, queryParallelism, likeName, ifNotExists); + CreateResourcePlanDesc desc = new CreateResourcePlanDesc(resourcePlanName, queryParallelism, likeName, ifNotExists); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeShowResourcePlan(ASTNode ast) throws SemanticException { @@ -939,11 +941,10 @@ private void analyzeShowResourcePlan(ASTNode ast) throws SemanticException { if (ast.getChildCount() > 1) { throw new SemanticException("Invalid syntax for SHOW RESOURCE PLAN statement"); } - ShowResourcePlanDesc showResourcePlanDesc = new ShowResourcePlanDesc(rpName, ctx.getResFile()); + ShowResourcePlanDesc showResourcePlanDesc = new ShowResourcePlanDesc(rpName, ctx.getResFile().toString()); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), showResourcePlanDesc))); - setFetchTask(createFetchTask(showResourcePlanDesc.getSchema(rpName))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showResourcePlanDesc))); + setFetchTask(createFetchTask(showResourcePlanDesc.getSchema())); } private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { @@ -958,10 +959,9 @@ private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { case HiveParser.TOK_DISABLE: WMNullableResourcePlan anyRp = new WMNullableResourcePlan(); anyRp.setStatus(WMResourcePlanStatus.ENABLED); - AlterResourcePlanDesc desc = new AlterResourcePlanDesc( - anyRp, null, false, false, true, false); + AlterResourcePlanDesc desc = new AlterResourcePlanDesc(anyRp, null, false, false, true, false, null); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); return; default: // Continue to handle changes to a specific plan. } @@ -1051,16 +1051,17 @@ private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { "Unexpected token in alter resource plan statement: " + child.getType()); } } - AlterResourcePlanDesc desc = new AlterResourcePlanDesc( - resourcePlan, rpName, validate, isEnableActivate, false, isReplace); + String resFile = null; if (validate) { ctx.setResFile(ctx.getLocalTmpPath()); - desc.setResFile(ctx.getResFile().toString()); + resFile = ctx.getResFile().toString(); } + AlterResourcePlanDesc desc = new AlterResourcePlanDesc(resourcePlan, rpName, validate, isEnableActivate, false, + isReplace, resFile); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); if (validate) { - setFetchTask(createFetchTask(AlterResourcePlanDesc.getSchema())); + setFetchTask(createFetchTask(AlterResourcePlanDesc.SCHEMA)); } } @@ -1081,8 +1082,7 @@ private void analyzeDropResourcePlan(ASTNode ast) throws SemanticException { } DropResourcePlanDesc desc = new DropResourcePlanDesc(rpName, ifExists); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeCreateTrigger(ASTNode ast) throws SemanticException { @@ -1100,7 +1100,7 @@ private void analyzeCreateTrigger(ASTNode ast) throws SemanticException { CreateWMTriggerDesc desc = new CreateWMTriggerDesc(trigger); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private String buildTriggerExpression(ASTNode ast) throws SemanticException { @@ -1156,7 +1156,7 @@ private void analyzeAlterTrigger(ASTNode ast) throws SemanticException { AlterWMTriggerDesc desc = new AlterWMTriggerDesc(trigger); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeDropTrigger(ASTNode ast) throws SemanticException { @@ -1168,8 +1168,7 @@ private void analyzeDropTrigger(ASTNode ast) throws SemanticException { DropWMTriggerDesc desc = new DropWMTriggerDesc(rpName, triggerName); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeCreatePool(ASTNode ast) throws SemanticException { @@ -1210,10 +1209,9 @@ private void analyzeCreatePool(ASTNode ast) throws SemanticException { if (!pool.isSetQueryParallelism()) { throw new SemanticException("query_parallelism should be specified for a pool"); } - CreateOrAlterWMPoolDesc desc = new CreateOrAlterWMPoolDesc(pool, poolPath, false); + CreateWMPoolDesc desc = new CreateWMPoolDesc(pool); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeAlterPool(ASTNode ast) throws SemanticException { @@ -1244,9 +1242,13 @@ private void analyzeAlterPool(ASTNode ast) throws SemanticException { hasTrigger = true; boolean drop = child.getType() == HiveParser.TOK_DROP_TRIGGER; String triggerName = unescapeIdentifier(param.getText()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - new CreateOrDropTriggerToPoolMappingDesc( - rpName, triggerName, poolPath, drop, isUnmanagedPool)))); + if (drop) { + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), + new AlterPoolDropTriggerDesc(rpName, triggerName, poolPath, isUnmanagedPool)))); + } else { + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), + new AlterPoolAddTriggerDesc(rpName, triggerName, poolPath, isUnmanagedPool)))); + } } else { if (isUnmanagedPool) { throw new SemanticException("Cannot alter the unmanaged pool"); @@ -1282,8 +1284,8 @@ private void analyzeAlterPool(ASTNode ast) throws SemanticException { if (!poolChanges.isSetPoolPath()) { poolChanges.setPoolPath(poolPath); } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - new CreateOrAlterWMPoolDesc(poolChanges, poolPath, true)))); + AlterWMPoolDesc ddlDesc = new AlterWMPoolDesc(poolChanges, poolPath); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), ddlDesc))); } } @@ -1296,8 +1298,7 @@ private void analyzeDropPool(ASTNode ast) throws SemanticException { DropWMPoolDesc desc = new DropWMPoolDesc(rpName, poolPath); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeCreateOrAlterMapping(ASTNode ast, boolean update) throws SemanticException { @@ -1316,9 +1317,14 @@ private void analyzeCreateOrAlterMapping(ASTNode ast, boolean update) throws Sem mapping.setOrdering(Integer.valueOf(ast.getChild(4).getText())); } - CreateOrAlterWMMappingDesc desc = new CreateOrAlterWMMappingDesc(mapping, update); + org.apache.hadoop.hive.ql.ddl.DDLDesc desc = null; + if (update) { + desc = new AlterWMMappingDesc(mapping); + } else { + desc = new CreateWMMappingDesc(mapping); + } addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeDropMapping(ASTNode ast) throws SemanticException { @@ -1331,7 +1337,7 @@ private void analyzeDropMapping(ASTNode ast) throws SemanticException { DropWMMappingDesc desc = new DropWMMappingDesc(new WMMapping(rpName, entityType, entityName)); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { @@ -4195,51 +4201,6 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) alterTblDesc))); } - /** - * Analyze skewed column names - * - * @param skewedColNames - * @param child - * @return - * @throws SemanticException - */ - private List analyzeAlterTableSkewedColNames(List skewedColNames, - ASTNode child) throws SemanticException { - Tree nNode = child.getChild(0); - if (nNode == null) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - ASTNode nAstNode = (ASTNode) nNode; - if (nAstNode.getToken().getType() != HiveParser.TOK_TABCOLNAME) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - skewedColNames = getColumnNames(nAstNode); - } - } - return skewedColNames; - } - - /** - * Given a ASTNode, return list of values. - * - * use case: - * create table xyz list bucketed (col1) with skew (1,2,5) - * AST Node is for (1,2,5) - * - * @param ast - * @return - */ - private List getColumnValues(ASTNode ast) { - List colList = new ArrayList(); - int numCh = ast.getChildCount(); - for (int i = 0; i < numCh; i++) { - ASTNode child = (ASTNode) ast.getChild(i); - colList.add(stripQuotes(child.getText()).toLowerCase()); - } - return colList; - } - - /** * Analyze alter table's skewed location * diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java deleted file mode 100644 index 53f1f71399..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -@Explain(displayName = "Create/Alter Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateOrAlterWMPoolDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 4872940135771213510L; - - private WMPool createPool; - private WMNullablePool alterPool; - private String poolPath; - private boolean update; - - public CreateOrAlterWMPoolDesc() {} - - public CreateOrAlterWMPoolDesc(WMPool pool, String poolPath, boolean update) { - this.createPool = pool; - this.poolPath = poolPath; - this.update = update; - } - - public CreateOrAlterWMPoolDesc(WMNullablePool pool, String poolPath, boolean update) { - this.alterPool = pool; - this.poolPath = poolPath; - this.update = update; - } - - @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public Object getPool() { - return createPool == null ? alterPool : createPool; - } - - public WMPool getCreatePool() { - return createPool; - } - - public WMNullablePool getAlterPool() { - return alterPool; - } - - public void setCreatePool(WMPool pool) { - this.createPool = pool; - } - - public void setAlterPool(WMNullablePool pool) { - this.alterPool = pool; - } - - @Explain(displayName="poolPath", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPoolPath() { - return poolPath; - } - - public void setPoolPath(String poolPath) { - this.poolPath = poolPath; - } - - @Explain(displayName="isUpdate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public boolean isUpdate() { - return update; - } - - public void setUpdate(boolean update) { - this.update = update; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index e6f3a6f917..1901defa27 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -52,23 +52,6 @@ private ShowConfDesc showConfDesc; - private CreateResourcePlanDesc createResourcePlanDesc; - private ShowResourcePlanDesc showResourcePlanDesc; - private DropResourcePlanDesc dropResourcePlanDesc; - private AlterResourcePlanDesc alterResourcePlanDesc; - - private CreateWMTriggerDesc createWMTriggerDesc; - private AlterWMTriggerDesc alterWMTriggerDesc; - private DropWMTriggerDesc dropWMTriggerDesc; - - private CreateOrAlterWMPoolDesc wmPoolDesc; - private DropWMPoolDesc dropWMPoolDesc; - - private CreateOrAlterWMMappingDesc wmMappingDesc; - private DropWMMappingDesc dropWMMappingDesc; - - private CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc; - private ReplRemoveFirstIncLoadPendFlagDesc replSetFirstIncLoadFlagDesc; boolean needLock = false; @@ -241,78 +224,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.killQueryDesc = killQueryDesc; } - public DDLWork(HashSet inputs, HashSet outputs, - CreateResourcePlanDesc createResourcePlanDesc) { - this(inputs, outputs); - this.createResourcePlanDesc = createResourcePlanDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - ShowResourcePlanDesc showResourcePlanDesc) { - this(inputs, outputs); - this.showResourcePlanDesc = showResourcePlanDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - DropResourcePlanDesc dropResourcePlanDesc) { - this(inputs, outputs); - this.dropResourcePlanDesc = dropResourcePlanDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - AlterResourcePlanDesc alterResourcePlanDesc) { - this(inputs, outputs); - this.alterResourcePlanDesc = alterResourcePlanDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CreateWMTriggerDesc createWMTriggerDesc) { - this(inputs, outputs); - this.createWMTriggerDesc = createWMTriggerDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - AlterWMTriggerDesc alterWMTriggerDesc) { - this(inputs, outputs); - this.alterWMTriggerDesc = alterWMTriggerDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - DropWMTriggerDesc dropWMTriggerDesc) { - this(inputs, outputs); - this.dropWMTriggerDesc = dropWMTriggerDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CreateOrAlterWMPoolDesc wmPoolDesc) { - this(inputs, outputs); - this.wmPoolDesc = wmPoolDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - DropWMPoolDesc dropWMPoolDesc) { - this(inputs, outputs); - this.dropWMPoolDesc = dropWMPoolDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CreateOrAlterWMMappingDesc wmMappingDesc) { - this(inputs, outputs); - this.wmMappingDesc = wmMappingDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - DropWMMappingDesc dropWMMappingDesc) { - this(inputs, outputs); - this.dropWMMappingDesc = dropWMMappingDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) { - this(inputs, outputs); - this.triggerToPoolMappingDesc = triggerToPoolMappingDesc; - } - public DDLWork(HashSet inputs, HashSet outputs, ReplRemoveFirstIncLoadPendFlagDesc replSetFirstIncLoadFlagDesc) { this(inputs, outputs); @@ -461,60 +372,6 @@ public InsertCommitHookDesc getInsertCommitHookDesc() { return insertCommitHookDesc; } - @Explain(displayName = "Create resource plan") - public CreateResourcePlanDesc getCreateResourcePlanDesc() { - return createResourcePlanDesc; - } - - @Explain(displayName = "Show resource plan") - public ShowResourcePlanDesc getShowResourcePlanDesc() { - return showResourcePlanDesc; - } - - public DropResourcePlanDesc getDropResourcePlanDesc() { - return dropResourcePlanDesc; - } - - public AlterResourcePlanDesc getAlterResourcePlanDesc() { - return alterResourcePlanDesc; - } - - public CreateWMTriggerDesc getCreateWMTriggerDesc() { - return createWMTriggerDesc; - } - - public AlterWMTriggerDesc getAlterWMTriggerDesc() { - return alterWMTriggerDesc; - } - - public DropWMTriggerDesc getDropWMTriggerDesc() { - return dropWMTriggerDesc; - } - - public CreateOrAlterWMPoolDesc getWmPoolDesc() { - return wmPoolDesc; - } - - public DropWMPoolDesc getDropWMPoolDesc() { - return dropWMPoolDesc; - } - - public CreateOrAlterWMMappingDesc getWmMappingDesc() { - return wmMappingDesc; - } - - public DropWMMappingDesc getDropWMMappingDesc() { - return dropWMMappingDesc; - } - - public CreateOrDropTriggerToPoolMappingDesc getTriggerToPoolMappingDesc() { - return triggerToPoolMappingDesc; - } - - public void setTriggerToPoolMappingDesc(CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) { - this.triggerToPoolMappingDesc = triggerToPoolMappingDesc; - } - public ReplRemoveFirstIncLoadPendFlagDesc getReplSetFirstIncLoadFlagDesc() { return replSetFirstIncLoadFlagDesc; }