diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java index a2f49b7503..98f1fbf540 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java @@ -36,9 +36,9 @@ boolean needLock = false; /** ReadEntitites that are passed to the hooks. */ - protected Set inputs; + private Set inputs; /** List of WriteEntities that are passed to the hooks. */ - protected Set outputs; + private Set outputs; public DDLWork2() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/CreateRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/CreateRoleOperation.java index 6782b02d20..57f8b46818 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/CreateRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/CreateRoleOperation.java @@ -37,7 +37,7 @@ public CreateRoleOperation(DDLOperationContext context, CreateRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); authorizer.createRole(desc.getName(), null); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/DropRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/DropRoleOperation.java index e8b55ecf4c..8f33bd31ed 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/DropRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/DropRoleOperation.java @@ -37,7 +37,7 @@ public DropRoleOperation(DDLOperationContext context, DropRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); authorizer.dropRole(desc.getName()); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantOperation.java index 633ac434e0..041987d53e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantOperation.java @@ -43,15 +43,15 @@ public GrantOperation(DDLOperationContext context, GrantDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); //Convert to object types used by the authorization plugin interface List hivePrincipals = AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), - RoleUtils.getAuthorizationTranslator(authorizer)); + PrivilegeUtils.getAuthorizationTranslator(authorizer)); List hivePrivileges = AuthorizationUtils.getHivePrivileges(desc.getPrivileges(), - RoleUtils.getAuthorizationTranslator(authorizer)); + PrivilegeUtils.getAuthorizationTranslator(authorizer)); HivePrivilegeObject hivePrivilegeObject = - RoleUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getPrivilegeSubject()); + PrivilegeUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getPrivilegeSubject()); HivePrincipal grantorPrincipal = new HivePrincipal(desc.getGrantor(), AuthorizationUtils.getHivePrincipalType(desc.getGrantorType())); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantRoleOperation.java index 19abe2794d..acb3a21d73 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/GrantRoleOperation.java @@ -42,10 +42,10 @@ public GrantRoleOperation(DDLOperationContext context, GrantRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); - List principals = - AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), RoleUtils.getAuthorizationTranslator(authorizer)); + List principals = AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), + PrivilegeUtils.getAuthorizationTranslator(authorizer)); HivePrincipal grantorPrincipal = null; if (desc.getGrantor() != null) { grantorPrincipal = diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RoleUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/PrivilegeUtils.java similarity index 92% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RoleUtils.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/PrivilegeUtils.java index cfbc4cf620..ad431454de 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RoleUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/PrivilegeUtils.java @@ -33,11 +33,11 @@ import org.apache.hadoop.hive.ql.session.SessionState; /** - * Common utilities for Role related ddl operations. + * Common utilities for Privilege related ddl operations. */ -final class RoleUtils { - private RoleUtils() { - throw new UnsupportedOperationException("RoleUtils should not be instantiated"); +final class PrivilegeUtils { + private PrivilegeUtils() { + throw new UnsupportedOperationException("PrivilegeUtils should not be instantiated"); } static HiveAuthorizer getSessionAuthorizer(HiveConf conf) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeOperation.java index bf4e01a191..62d79651a0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeOperation.java @@ -43,15 +43,15 @@ public RevokeOperation(DDLOperationContext context, RevokeDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); //Convert to object types used by the authorization plugin interface List hivePrincipals = AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), - RoleUtils.getAuthorizationTranslator(authorizer)); + PrivilegeUtils.getAuthorizationTranslator(authorizer)); List hivePrivileges = AuthorizationUtils.getHivePrivileges(desc.getPrivileges(), - RoleUtils.getAuthorizationTranslator(authorizer)); + PrivilegeUtils.getAuthorizationTranslator(authorizer)); HivePrivilegeObject hivePrivilegeObject = - RoleUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getPrivilegeSubject()); + PrivilegeUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getPrivilegeSubject()); HivePrincipal grantorPrincipal = new HivePrincipal(null, null); authorizer.revokePrivileges(hivePrincipals, hivePrivileges, hivePrivilegeObject, grantorPrincipal, diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeRoleOperation.java index 0b3b27695d..ecd664b1ed 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/RevokeRoleOperation.java @@ -42,10 +42,10 @@ public RevokeRoleOperation(DDLOperationContext context, RevokeRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); - List principals = - AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), RoleUtils.getAuthorizationTranslator(authorizer)); + List principals = AuthorizationUtils.getHivePrincipals(desc.getPrincipals(), + PrivilegeUtils.getAuthorizationTranslator(authorizer)); HivePrincipal grantorPrincipal = null; if (desc.getGrantor() != null) { grantorPrincipal = diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/SetRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/SetRoleOperation.java index d119fe4a28..590e015630 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/SetRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/SetRoleOperation.java @@ -37,7 +37,7 @@ public SetRoleOperation(DDLOperationContext context, SetRoleDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); authorizer.setCurrentRole(desc.getName()); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowCurrentRoleOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowCurrentRoleOperation.java index 9738ddbcc0..9e83ece15c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowCurrentRoleOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowCurrentRoleOperation.java @@ -40,9 +40,9 @@ public ShowCurrentRoleOperation(DDLOperationContext context, ShowCurrentRoleDesc @Override public int execute() throws HiveException, IOException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); List roleNames = authorizer.getCurrentRoleNames(); - RoleUtils.writeListToFileAfterSort(roleNames, desc.getResFile(), context); + PrivilegeUtils.writeListToFileAfterSort(roleNames, desc.getResFile(), context); return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowGrantOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowGrantOperation.java index 50b41800a1..0affff03a0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowGrantOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowGrantOperation.java @@ -48,11 +48,11 @@ public ShowGrantOperation(DDLOperationContext context, ShowGrantDesc desc) { @Override public int execute() throws HiveException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); try { List privInfos = authorizer.showPrivileges( - RoleUtils.getAuthorizationTranslator(authorizer).getHivePrincipal(desc.getPrincipalDesc()), - RoleUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getHiveObj())); + PrivilegeUtils.getAuthorizationTranslator(authorizer).getHivePrincipal(desc.getPrincipalDesc()), + PrivilegeUtils.getAuthorizationTranslator(authorizer).getHivePrivilegeObject(desc.getHiveObj())); boolean testMode = context.getConf().getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST); DDLUtils.writeToFile(writeGrantInfo(privInfos, testMode), desc.getResFile(), context); } catch (IOException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowPrincipalsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowPrincipalsOperation.java index 392142ba14..73f1030d76 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowPrincipalsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowPrincipalsOperation.java @@ -44,7 +44,7 @@ public ShowPrincipalsOperation(DDLOperationContext context, ShowPrincipalsDesc d @Override public int execute() throws HiveException, IOException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); boolean testMode = context.getConf().getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST); List roleGrants = authorizer.getPrincipalGrantInfoForRole(desc.getName()); DDLUtils.writeToFile(writeHiveRoleGrantInfo(roleGrants, testMode), desc.getResFile(), context); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRoleGrantOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRoleGrantOperation.java index 178ea8e3bc..e3e4e1409c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRoleGrantOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRoleGrantOperation.java @@ -45,7 +45,7 @@ public ShowRoleGrantOperation(DDLOperationContext context, ShowRoleGrantDesc des @Override public int execute() throws HiveException, IOException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); boolean testMode = context.getConf().getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST); List roles = authorizer.getRoleGrantInfoForPrincipal( AuthorizationUtils.getHivePrincipal(desc.getName(), desc.getPrincipalType())); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRolesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRolesOperation.java index 22ca7f350d..0597eaffe2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRolesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/privilege/ShowRolesOperation.java @@ -40,9 +40,9 @@ public ShowRolesOperation(DDLOperationContext context, ShowRolesDesc desc) { @Override public int execute() throws HiveException, IOException { - HiveAuthorizer authorizer = RoleUtils.getSessionAuthorizer(context.getConf()); + HiveAuthorizer authorizer = PrivilegeUtils.getSessionAuthorizer(context.getConf()); List allRoles = authorizer.getAllRoles(); - RoleUtils.writeListToFileAfterSort(allRoles, desc.getResFile(), context); + PrivilegeUtils.writeListToFileAfterSort(allRoles, desc.getResFile(), context); return 0; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java new file mode 100644 index 0000000000..0479e0ad56 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerDesc.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER POOL ... ADD TRIGGER commands. + */ +@Explain(displayName = "Create Trigger to pool mappings", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterPoolAddTriggerDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 383046258694558029L; + + static { + DDLTask2.registerOperation(AlterPoolAddTriggerDesc.class, AlterPoolAddTriggerOperation.class); + } + + private final String planName; + private final String triggerName; + private final String poolPath; + private final boolean isUnmanagedPool; + + public AlterPoolAddTriggerDesc(String planName, String triggerName, String poolPath, boolean isUnmanagedPool) { + this.planName = planName; + this.triggerName = triggerName; + this.poolPath = poolPath; + this.isUnmanagedPool = isUnmanagedPool; + } + + @Explain(displayName = "resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; + } + + @Explain(displayName = "Trigger name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerName() { + return triggerName; + } + + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPathForExplain() { + return isUnmanagedPool ? "" : poolPath; + } + + public String getPoolPath() { + return poolPath; + } + + public boolean isUnmanagedPool() { + return isUnmanagedPool; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java new file mode 100644 index 0000000000..e6bee58205 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolAddTriggerOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of adding a trigger to a pool. + */ +public class AlterPoolAddTriggerOperation extends DDLOperation { + private final AlterPoolAddTriggerDesc desc; + + public AlterPoolAddTriggerOperation(DDLOperationContext context, AlterPoolAddTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + if (!desc.isUnmanagedPool()) { + context.getDb().createOrDropTriggerToPoolMapping(desc.getPlanName(), desc.getTriggerName(), desc.getPoolPath(), + false); + } else { + assert desc.getPoolPath() == null; + WMTrigger trigger = new WMTrigger(desc.getPlanName(), desc.getTriggerName()); + // If we are dropping from unmanaged, unset the flag; and vice versa + trigger.setIsInUnmanaged(true); + context.getDb().alterWMTrigger(trigger); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java new file mode 100644 index 0000000000..ecd800fec3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerDesc.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER POOL ... DROP TRIGGER commands. + */ +@Explain(displayName = "Drop Trigger to pool mappings", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterPoolDropTriggerDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 383046258694558029L; + + static { + DDLTask2.registerOperation(AlterPoolDropTriggerDesc.class, AlterPoolDropTriggerOperation.class); + } + + private final String planName; + private final String triggerName; + private final String poolPath; + private final boolean isUnmanagedPool; + + public AlterPoolDropTriggerDesc(String planName, String triggerName, String poolPath, boolean isUnmanagedPool) { + this.planName = planName; + this.triggerName = triggerName; + this.poolPath = poolPath; + this.isUnmanagedPool = isUnmanagedPool; + } + + @Explain(displayName = "resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; + } + + @Explain(displayName = "Trigger name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerName() { + return triggerName; + } + + @Explain(displayName = "Pool path", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPathForExplain() { + return isUnmanagedPool ? "" : poolPath; + } + + public String getPoolPath() { + return poolPath; + } + + public boolean isUnmanagedPool() { + return isUnmanagedPool; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java new file mode 100644 index 0000000000..fc894051b3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterPoolDropTriggerOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a trigger to pool mapping. + */ +public class AlterPoolDropTriggerOperation extends DDLOperation { + private final AlterPoolDropTriggerDesc desc; + + public AlterPoolDropTriggerOperation(DDLOperationContext context, AlterPoolDropTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + if (!desc.isUnmanagedPool()) { + context.getDb().createOrDropTriggerToPoolMapping(desc.getPlanName(), desc.getTriggerName(), desc.getPoolPath(), + true); + } else { + assert desc.getPoolPath() == null; + WMTrigger trigger = new WMTrigger(desc.getPlanName(), desc.getTriggerName()); + // If we are dropping from unmanaged, unset the flag; and vice versa + trigger.setIsInUnmanaged(false); + context.getDb().alterWMTrigger(trigger); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java similarity index 53% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java index dd2eaced23..4e7a699f5b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanDesc.java @@ -16,101 +16,77 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Alter Resource plans", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterResourcePlanDesc extends DDLDesc implements Serializable { +/** + * DDL task description for ALTER RESOURCE PLAN commands. + */ +@Explain(displayName = "Alter Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -3514685833183437279L; - private WMNullableResourcePlan resourcePlan; - private String rpName; - private boolean validate; - private boolean isEnableActivate, isForceDeactivate, isReplace; - private String resFile; + static { + DDLTask2.registerOperation(AlterResourcePlanDesc.class, AlterResourcePlanOperation.class); + } + + public static final String SCHEMA = "error#string"; - public AlterResourcePlanDesc() {} + private final WMNullableResourcePlan resourcePlan; + private final String planName; + private final boolean validate; + private final boolean isEnableActivate; + private final boolean isForceDeactivate; + private final boolean isReplace; + private final String resFile; - public AlterResourcePlanDesc(WMNullableResourcePlan resourcePlan, String rpName, boolean validate, - boolean isEnableActivate, boolean isForceDeactivate, boolean isReplace) { + public AlterResourcePlanDesc(WMNullableResourcePlan resourcePlan, String planName, boolean validate, + boolean isEnableActivate, boolean isForceDeactivate, boolean isReplace, String resFile) { this.resourcePlan = resourcePlan; - this.rpName = rpName; + this.planName = planName; this.validate = validate; this.isEnableActivate = isEnableActivate; this.isForceDeactivate = isForceDeactivate; this.isReplace = isReplace; + this.resFile = resFile; } - @Explain(displayName="Resource plan changed fields", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="Resource plan changed fields", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public WMNullableResourcePlan getResourcePlan() { return resourcePlan; } - public void setResourcePlan(WMNullableResourcePlan resourcePlan) { - this.resourcePlan = resourcePlan; - } - - @Explain(displayName="Resource plan to modify", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getResourcePlanName() { - return rpName; - } - - public void setResourcePlanName(String rpName) { - this.rpName = rpName; + @Explain(displayName="Resource plan to modify", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; } - @Explain(displayName="shouldValidate", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="shouldValidate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public boolean shouldValidate() { return validate; } - public void setValidate(boolean validate) { - this.validate = validate; - } - public boolean isEnableActivate() { return isEnableActivate; } - public void setIsEnableActivate(boolean b) { - this.isEnableActivate = b; - } - public boolean isForceDeactivate() { return isForceDeactivate; } - public void setIsForceDeactivate(boolean b) { - this.isForceDeactivate = b; - } - public boolean isReplace() { return isReplace; } - public void setIsReplace(boolean b) { - this.isReplace = b; - } - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - - public void setResFile(String resFile) { - this.resFile = resFile; - } - - public static String getSchema() { - return "error#string"; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java new file mode 100644 index 0000000000..89fa0a3032 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterResourcePlanOperation.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager; +import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.Collection; +import java.util.concurrent.ExecutionException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +import com.google.common.util.concurrent.ListenableFuture; + +/** + * Operation process of altering a resource plan. + */ +public class AlterResourcePlanOperation extends DDLOperation { + private final AlterResourcePlanDesc desc; + + // Note: the resource plan operations are going to be annotated with namespace based on the config + // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see + // that there exist other namespaces, because one HS2 always operates inside just one and we + // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about + // namespaces - Hive.java will transparently scope everything. That's the idea anyway. + public AlterResourcePlanOperation(DDLOperationContext context, AlterResourcePlanDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + if (desc.shouldValidate()) { + WMValidateResourcePlanResponse result = context.getDb().validateResourcePlan(desc.getPlanName()); + try (DataOutputStream out = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + context.getFormatter().showErrors(out, result); + } catch (IOException e) { + throw new HiveException(e); + } + return 0; + } + + WMNullableResourcePlan resourcePlan = desc.getResourcePlan(); + WMFullResourcePlan appliedResourcePlan = context.getDb().alterResourcePlan(desc.getPlanName(), resourcePlan, + desc.isEnableActivate(), desc.isForceDeactivate(), desc.isReplace()); + + boolean isActivate = resourcePlan.getStatus() != null && resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE; + boolean mustHaveAppliedChange = isActivate || desc.isForceDeactivate(); + if (!mustHaveAppliedChange && !desc.isReplace()) { + return 0; // The modification cannot affect an active plan. + } + if (appliedResourcePlan == null && !mustHaveAppliedChange) { + return 0; // Replacing an inactive plan. + } + + WorkloadManager wm = WorkloadManager.getInstance(); + boolean isInTest = HiveConf.getBoolVar(context.getConf(), ConfVars.HIVE_IN_TEST); + if (wm == null && isInTest) { + return 0; // Skip for tests if WM is not present. + } + + if ((appliedResourcePlan == null) != desc.isForceDeactivate()) { + throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable"); + // TODO: shut down HS2? + } + assert appliedResourcePlan == null || appliedResourcePlan.getPlan().getStatus() == WMResourcePlanStatus.ACTIVE; + + handleWorkloadManagementServiceChange(wm, isActivate, appliedResourcePlan); + + return 0; + } + + private int handleWorkloadManagementServiceChange(WorkloadManager wm, boolean isActivate, + WMFullResourcePlan appliedResourcePlan) throws HiveException { + String name = null; + if (isActivate) { + name = appliedResourcePlan.getPlan().getName(); + LOG.info("Activating a new resource plan " + name + ": " + appliedResourcePlan); + } else { + LOG.info("Disabling workload management"); + } + + if (wm != null) { + // Note: as per our current constraints, the behavior of two parallel activates is + // undefined; although only one will succeed and the other will receive exception. + // We need proper (semi-)transactional modifications to support this without hacks. + ListenableFuture future = wm.updateResourcePlanAsync(appliedResourcePlan); + boolean isOk = false; + try { + // Note: we may add an async option in future. For now, let the task fail for the user. + future.get(); + isOk = true; + if (isActivate) { + LOG.info("Successfully activated resource plan " + name); + } else { + LOG.info("Successfully disabled workload management"); + } + } catch (InterruptedException | ExecutionException e) { + throw new HiveException(e); + } finally { + if (!isOk) { + if (isActivate) { + LOG.error("Failed to activate resource plan " + name); + } else { + LOG.error("Failed to disable workload management"); + } + // TODO: shut down HS2? + } + } + } + + TezSessionPoolManager pm = TezSessionPoolManager.getInstance(); + if (pm != null) { + Collection appliedTriggers = pm.updateTriggers(appliedResourcePlan); + LOG.info("Updated tez session pool manager with active resource plan: {} appliedTriggers: {}", name, + appliedTriggers); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java similarity index 62% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java index 148e73212c..641dfa9ef8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingDesc.java @@ -15,44 +15,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Create/Alter Mapping", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateOrAlterWMMappingDesc extends DDLDesc implements Serializable { +/** + * DDL task description for ALTER ... MAPPING commands. + */ +@Explain(displayName = "Alter Mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMMappingDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -442968568922083053L; - private WMMapping mapping; - private boolean update; + static { + DDLTask2.registerOperation(AlterWMMappingDesc.class, AlterWMMappingOperation.class); + } - public CreateOrAlterWMMappingDesc() {} + private final WMMapping mapping; - public CreateOrAlterWMMappingDesc(WMMapping mapping, boolean update) { + public AlterWMMappingDesc(WMMapping mapping) { this.mapping = mapping; - this.update = update; } @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public WMMapping getMapping() { return mapping; } - - public void setMapping(WMMapping mapping) { - this.mapping = mapping; - } - - @Explain(displayName = "update", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public boolean isUpdate() { - return update; - } - - public void setUpdate(boolean update) { - this.update = update; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java new file mode 100644 index 0000000000..847ead543c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMMappingOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of altering a workload management mapping. + */ +public class AlterWMMappingOperation extends DDLOperation { + private final AlterWMMappingDesc desc; + + public AlterWMMappingOperation(DDLOperationContext context, AlterWMMappingDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().createOrUpdateWMMapping(desc.getMapping(), true); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java new file mode 100644 index 0000000000..9ce8b2687e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolDesc.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.api.WMNullablePool; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER POOL commands. + */ +@Explain(displayName = "Alter Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMPoolDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 4872940135771213510L; + + static { + DDLTask2.registerOperation(AlterWMPoolDesc.class, AlterWMPoolOperation.class); + } + + private final WMNullablePool pool; + private final String poolPath; + + public AlterWMPoolDesc(WMNullablePool pool, String poolPath) { + this.pool = pool; + this.poolPath = poolPath; + } + + @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public WMNullablePool getPool() { + return pool; + } + + @Explain(displayName="poolPath", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java new file mode 100644 index 0000000000..4d835eeb6c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMPoolOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of altering a workload management pool. + */ +public class AlterWMPoolOperation extends DDLOperation { + private final AlterWMPoolDesc desc; + + public AlterWMPoolOperation(DDLOperationContext context, AlterWMPoolDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().alterWMPool(desc.getPool(), desc.getPoolPath()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java similarity index 63% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java index 677a47caca..9301626016 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerDesc.java @@ -16,33 +16,35 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName="Alter WM Trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterWMTriggerDesc extends DDLDesc implements Serializable { +/** + * DDL task description for ALTER TRIGGER commands. + */ +@Explain(displayName="Alter WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterWMTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -2105736261687539210L; - private WMTrigger trigger; + static { + DDLTask2.registerOperation(AlterWMTriggerDesc.class, AlterWMTriggerOperation.class); + } - public AlterWMTriggerDesc() {} + private final WMTrigger trigger; public AlterWMTriggerDesc(WMTrigger trigger) { this.trigger = trigger; } - @Explain(displayName="trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) public WMTrigger getTrigger() { return trigger; } - - public void setTrigger(WMTrigger trigger) { - this.trigger = trigger; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java new file mode 100644 index 0000000000..fab76d9c25 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/AlterWMTriggerOperation.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of altering a workload management trigger. + */ +public class AlterWMTriggerOperation extends DDLOperation { + private final AlterWMTriggerDesc desc; + + public AlterWMTriggerOperation(DDLOperationContext context, AlterWMTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + WMUtils.validateTrigger(desc.getTrigger()); + context.getDb().alterWMTrigger(desc.getTrigger()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java similarity index 62% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java index 9c18f59d09..f7c3a50efc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanDesc.java @@ -16,37 +16,46 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; +/** + * DDL task description for CREATE RESOURCE PLAN commands. + */ @Explain(displayName = "Create ResourcePlan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateResourcePlanDesc extends DDLDesc implements Serializable { +public class CreateResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -3492803425541479414L; - private WMResourcePlan resourcePlan; + static { + DDLTask2.registerOperation(CreateResourcePlanDesc.class, CreateResourcePlanOperation.class); + } + + private final String planName; + private final Integer queryParallelism; private String copyFromName; private boolean ifNotExists; - // For serialization only. - public CreateResourcePlanDesc() { - } - - public CreateResourcePlanDesc(String planName, Integer queryParallelism, String copyFromName, - boolean ifNotExists) { - resourcePlan = new WMResourcePlan(planName); - if (queryParallelism != null) { - resourcePlan.setQueryParallelism(queryParallelism); - } + public CreateResourcePlanDesc(String planName, Integer queryParallelism, String copyFromName, boolean ifNotExists) { + this.planName = planName; + this.queryParallelism = queryParallelism; this.copyFromName = copyFromName; this.ifNotExists = ifNotExists; } - @Explain(displayName="resourcePlan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public WMResourcePlan getResourcePlan() { - return resourcePlan; + @Explain(displayName="planName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; + } + + @Explain(displayName="queryParallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getQueryParallelism() { + return queryParallelism; } @Explain(displayName="Copy from", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -59,4 +68,4 @@ public String getCopyFromName() { public boolean getIfNotExists() { return ifNotExists; } -} \ No newline at end of file +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java new file mode 100644 index 0000000000..872e9983c0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateResourcePlanOperation.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of creating a resource plan. + */ +public class CreateResourcePlanOperation extends DDLOperation { + private final CreateResourcePlanDesc desc; + + public CreateResourcePlanOperation(DDLOperationContext context, CreateResourcePlanDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + WMResourcePlan plan = new WMResourcePlan(desc.getPlanName()); + if (desc.getQueryParallelism() != null) { + plan.setQueryParallelism(desc.getQueryParallelism()); + } + + context.getDb().createResourcePlan(plan, desc.getCopyFromName(), desc.getIfNotExists()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java new file mode 100644 index 0000000000..c1455bcdd6 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingDesc.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for CREATE ... MAPPING commands. + */ +@Explain(displayName = "Create Mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMMappingDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = -442968568922083053L; + + static { + DDLTask2.registerOperation(CreateWMMappingDesc.class, CreateWMMappingOperation.class); + } + + private final WMMapping mapping; + + public CreateWMMappingDesc(WMMapping mapping) { + this.mapping = mapping; + } + + @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public WMMapping getMapping() { + return mapping; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java new file mode 100644 index 0000000000..dc629e5a5f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMMappingOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of creating a workload management mapping. + */ +public class CreateWMMappingOperation extends DDLOperation { + private final CreateWMMappingDesc desc; + + public CreateWMMappingOperation(DDLOperationContext context, CreateWMMappingDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().createOrUpdateWMMapping(desc.getMapping(), false); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java new file mode 100644 index 0000000000..93c38d4f7d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolDesc.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for CREATE POOL commands. + */ +@Explain(displayName = "Create Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMPoolDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 4872940135771213510L; + + static { + DDLTask2.registerOperation(CreateWMPoolDesc.class, CreateWMPoolOperation.class); + } + + private final WMPool pool; + + public CreateWMPoolDesc(WMPool pool) { + this.pool = pool; + } + + @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public WMPool getPool() { + return pool; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java new file mode 100644 index 0000000000..75030c869f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMPoolOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of creating a workload management pool. + */ +public class CreateWMPoolOperation extends DDLOperation { + private final CreateWMPoolDesc desc; + + public CreateWMPoolOperation(DDLOperationContext context, CreateWMPoolDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().createWMPool(desc.getPool()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java similarity index 63% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java index 8eb97291ff..e6d9435ede 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerDesc.java @@ -16,33 +16,35 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName="Create WM Trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateWMTriggerDesc extends DDLDesc implements Serializable { +/** + * DDL task description for CREATE TRIGGER commands. + */ +@Explain(displayName="Create WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateWMTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1705317739121300923L; - private WMTrigger trigger; + static { + DDLTask2.registerOperation(CreateWMTriggerDesc.class, CreateWMTriggerOperation.class); + } - public CreateWMTriggerDesc() {} + private final WMTrigger trigger; public CreateWMTriggerDesc(WMTrigger trigger) { this.trigger = trigger; } - @Explain(displayName="trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) public WMTrigger getTrigger() { return trigger; } - - public void setTrigger(WMTrigger trigger) { - this.trigger = trigger; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java new file mode 100644 index 0000000000..1b76faf79d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/CreateWMTriggerOperation.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of creating a workload management trigger. + */ +public class CreateWMTriggerOperation extends DDLOperation { + private final CreateWMTriggerDesc desc; + + public CreateWMTriggerOperation(DDLOperationContext context, CreateWMTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + WMUtils.validateTrigger(desc.getTrigger()); + context.getDb().createWMTrigger(desc.getTrigger()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java similarity index 68% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java index efaf0789b0..d70eeb9a49 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanDesc.java @@ -16,29 +16,37 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; +/** + * DDL task description for DROP RESOURCE PLAN commands. + */ @Explain(displayName = "Drop Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropResourcePlanDesc extends DDLDesc implements Serializable { +public class DropResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1258596919510047766L; - private String rpName; - private boolean ifExists; + static { + DDLTask2.registerOperation(DropResourcePlanDesc.class, DropResourcePlanOperation.class); + } - public DropResourcePlanDesc() {} + private final String planName; + private final boolean ifExists; - public DropResourcePlanDesc(String rpName, boolean ifExists) { - this.setRpName(rpName); - this.setIfExists(ifExists); + public DropResourcePlanDesc(String planName, boolean ifExists) { + this.planName = planName; + this.ifExists = ifExists; } @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getRpName() { - return rpName; + public String getPlanName() { + return planName; } @Explain(displayName="ifExists", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, @@ -46,13 +54,4 @@ public String getRpName() { public boolean getIfExists() { return ifExists; } - - public void setRpName(String rpName) { - this.rpName = rpName; - } - - public void setIfExists(boolean ifExists) { - this.ifExists = ifExists; - } - -} \ No newline at end of file +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java new file mode 100644 index 0000000000..622ff879b0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropResourcePlanOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a resource plan. + */ +public class DropResourcePlanOperation extends DDLOperation { + private final DropResourcePlanDesc desc; + + public DropResourcePlanOperation(DDLOperationContext context, DropResourcePlanDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().dropResourcePlan(desc.getPlanName(), desc.getIfExists()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java similarity index 69% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java index 54becd6283..7083937481 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingDesc.java @@ -15,21 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Drop resource plan", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropWMMappingDesc extends DDLDesc implements Serializable { +/** + * DDL task description for DROP ... MAPPING commands. + */ +@Explain(displayName = "Drop mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropWMMappingDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -1567558687529244218L; - private WMMapping mapping; + static { + DDLTask2.registerOperation(DropWMMappingDesc.class, DropWMMappingOperation.class); + } - public DropWMMappingDesc() {} + private final WMMapping mapping; public DropWMMappingDesc(WMMapping mapping) { this.mapping = mapping; @@ -39,8 +46,4 @@ public DropWMMappingDesc(WMMapping mapping) { public WMMapping getMapping() { return mapping; } - - public void setMapping(WMMapping mapping) { - this.mapping = mapping; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java new file mode 100644 index 0000000000..d1172069db --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMMappingOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a workload management mapping. + */ +public class DropWMMappingOperation extends DDLOperation { + private final DropWMMappingDesc desc; + + public DropWMMappingOperation(DDLOperationContext context, DropWMMappingDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().dropWMMapping(desc.getMapping()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java similarity index 53% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java index cad6c1e396..4034130535 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolDesc.java @@ -15,36 +15,40 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; -public class DropWMPoolDesc extends DDLDesc implements Serializable { +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for DROP POOL commands. + */ +@Explain(displayName="Drop WM Pool", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropWMPoolDesc implements DDLDesc, Serializable { private static final long serialVersionUID = -2608462103392563252L; - private String resourcePlanName; - private String poolPath; + static { + DDLTask2.registerOperation(DropWMPoolDesc.class, DropWMPoolOperation.class); + } - public DropWMPoolDesc() {} + private final String planName; + private final String poolPath; - public DropWMPoolDesc(String resourcePlanName, String poolPath) { - this.resourcePlanName = resourcePlanName; + public DropWMPoolDesc(String planName, String poolPath) { + this.planName = planName; this.poolPath = poolPath; } - public String getResourcePlanName() { - return resourcePlanName; - } - - public void setResourcePlanName(String resourcePlanName) { - this.resourcePlanName = resourcePlanName; + @Explain(displayName="poolName", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; } public String getPoolPath() { return poolPath; } - - public void setPoolPath(String poolPath) { - this.poolPath = poolPath; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java new file mode 100644 index 0000000000..707f3231d0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMPoolOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a workload management pool. + */ +public class DropWMPoolOperation extends DDLOperation { + private final DropWMPoolDesc desc; + + public DropWMPoolOperation(DDLOperationContext context, DropWMPoolDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().dropWMPool(desc.getPlanName(), desc.getPoolPath()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMTriggerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java similarity index 53% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMTriggerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java index da7c18310c..089b78a8a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMTriggerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerDesc.java @@ -16,44 +16,41 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName="Drop WM Trigger", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropWMTriggerDesc extends DDLDesc implements Serializable { +/** + * DDL task description for DROP TRIGGER commands. + */ +@Explain(displayName="Drop WM Trigger", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropWMTriggerDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 963803766313787632L; - private String rpName; - private String triggerName; + static { + DDLTask2.registerOperation(DropWMTriggerDesc.class, DropWMTriggerOperation.class); + } - public DropWMTriggerDesc() {} + private final String planName; + private final String triggerName; - public DropWMTriggerDesc(String rpName, String triggerName) { - this.rpName = rpName; + public DropWMTriggerDesc(String planName, String triggerName) { + this.planName = planName; this.triggerName = triggerName; } - @Explain(displayName="resourcePlanName", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getRpName() { - return rpName; - } - - public void setRpName(String rpName) { - this.rpName = rpName; + @Explain(displayName="resourcePlanName", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPlanName() { + return planName; } - @Explain(displayName="triggerName", - explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="triggerName", explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTriggerName() { return triggerName; } - - public void setTriggerName(String triggerName) { - this.triggerName = triggerName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java new file mode 100644 index 0000000000..0218688e00 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/DropWMTriggerOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a workload management trigger. + */ +public class DropWMTriggerOperation extends DDLOperation { + private final DropWMTriggerDesc desc; + + public DropWMTriggerOperation(DDLOperationContext context, DropWMTriggerDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + context.getDb().dropWMTrigger(desc.getPlanName(), desc.getTriggerName()); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java similarity index 59% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java index 370f0d9e78..1f4b5cfd71 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanDesc.java @@ -16,53 +16,48 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; import java.io.Serializable; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Show Resource plans", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowResourcePlanDesc extends DDLDesc implements Serializable { +/** + * DDL task description for SHOW RESOURCE PLAN(S) commands. + */ +@Explain(displayName = "Show Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowResourcePlanDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 6076076933035978545L; - private static final String TABLE = "show_resourceplan"; + static { + DDLTask2.registerOperation(ShowResourcePlanDesc.class, ShowResourcePlanOperation.class); + } + private static final String ALL_SCHEMA = "rp_name,status,query_parallelism#string,string,int"; private static final String SINGLE_SCHEMA = "line#string"; - String resFile; - String resourcePlanName; - - // For serialization only. - public ShowResourcePlanDesc() {} - - public ShowResourcePlanDesc(String rpName, Path resFile) { - this.resourcePlanName = rpName; - this.resFile = resFile.toString(); - } - - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } + private final String planName; + private final String resFile; - public void setResFile(String resFile) { + public ShowResourcePlanDesc(String planName, String resFile) { + this.planName = planName; this.resFile = resFile; } - @Explain(displayName="resourcePlanName", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getResourcePlanName() { - return resourcePlanName; + return planName; } - public String getTable() { - return TABLE; + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; } - public String getSchema(String rpName) { - return (rpName == null) ? ALL_SCHEMA : SINGLE_SCHEMA; + public String getSchema() { + return (planName == null) ? ALL_SCHEMA : SINGLE_SCHEMA; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java new file mode 100644 index 0000000000..49a50d3e6c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/ShowResourcePlanOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of showing resource plans. + */ +public class ShowResourcePlanOperation extends DDLOperation { + private final ShowResourcePlanDesc desc; + + public ShowResourcePlanOperation(DDLOperationContext context, ShowResourcePlanDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException, IOException { + // TODO: Enhance showResourcePlan to display all the pools, triggers and mappings. + try (DataOutputStream out = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + String planName = desc.getResourcePlanName(); + if (planName != null) { + context.getFormatter().showFullResourcePlan(out, context.getDb().getResourcePlan(planName)); + } else { + context.getFormatter().showResourcePlans(out, context.getDb().getAllResourcePlans()); + } + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java new file mode 100644 index 0000000000..4860ee7dcd --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/WMUtils.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; + +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.wm.ExecutionTrigger; + +/** + * Common utilities for Workload Management related ddl operations. + */ +final class WMUtils { + private WMUtils() { + throw new UnsupportedOperationException("WMUtils should not be instantiated"); + } + + static void validateTrigger(WMTrigger trigger) throws HiveException { + try { + ExecutionTrigger.fromWMTrigger(trigger); + } catch (IllegalArgumentException e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java new file mode 100644 index 0000000000..8e314243d0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/workloadmanagement/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Workload Management related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.workloadmanagement; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 13d7d6fcdb..7c9d910c20 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -25,7 +25,6 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Iterator; @@ -35,13 +34,11 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.concurrent.ExecutionException; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ListenableFuture; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; @@ -76,11 +73,6 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.TxnInfo; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.CompilationOpContext; @@ -90,9 +82,7 @@ import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.ArchiveUtils.PartSpecInfo; -import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager; import org.apache.hadoop.hive.ql.exec.tez.TezTask; -import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -126,25 +116,14 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; -import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; -import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; -import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; -import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; -import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc; import org.apache.hadoop.hive.ql.plan.KillQueryDesc; @@ -162,13 +141,11 @@ import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.wm.ExecutionTrigger; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; @@ -362,54 +339,6 @@ public int execute(DriverContext driverContext) { return killQuery(db, killQueryDesc); } - if (work.getCreateResourcePlanDesc() != null) { - return createResourcePlan(db, work.getCreateResourcePlanDesc()); - } - - if (work.getShowResourcePlanDesc() != null) { - return showResourcePlans(db, work.getShowResourcePlanDesc()); - } - - if (work.getAlterResourcePlanDesc() != null) { - return alterResourcePlan(db, work.getAlterResourcePlanDesc()); - } - - if (work.getDropResourcePlanDesc() != null) { - return dropResourcePlan(db, work.getDropResourcePlanDesc()); - } - - if (work.getCreateWMTriggerDesc() != null) { - return createWMTrigger(db, work.getCreateWMTriggerDesc()); - } - - if (work.getAlterWMTriggerDesc() != null) { - return alterWMTrigger(db, work.getAlterWMTriggerDesc()); - } - - if (work.getDropWMTriggerDesc() != null) { - return dropWMTrigger(db, work.getDropWMTriggerDesc()); - } - - if (work.getWmPoolDesc() != null) { - return createOrAlterWMPool(db, work.getWmPoolDesc()); - } - - if (work.getDropWMPoolDesc() != null) { - return dropWMPool(db, work.getDropWMPoolDesc()); - } - - if (work.getWmMappingDesc() != null) { - return createOrAlterWMMapping(db, work.getWmMappingDesc()); - } - - if (work.getDropWMMappingDesc() != null) { - return dropWMMapping(db, work.getDropWMMappingDesc()); - } - - if (work.getTriggerToPoolMappingDesc() != null) { - return createOrDropTriggerToPoolMapping(db, work.getTriggerToPoolMappingDesc()); - } - if (work.getReplSetFirstIncLoadFlagDesc() != null) { return remFirstIncPendFlag(db, work.getReplSetFirstIncLoadFlagDesc()); } @@ -421,192 +350,6 @@ public int execute(DriverContext driverContext) { return 0; } - private int createResourcePlan(Hive db, CreateResourcePlanDesc createResourcePlanDesc) - throws HiveException { - db.createResourcePlan(createResourcePlanDesc.getResourcePlan(), - createResourcePlanDesc.getCopyFromName(), createResourcePlanDesc.getIfNotExists()); - return 0; - } - - private int showResourcePlans(Hive db, ShowResourcePlanDesc showResourcePlanDesc) - throws HiveException { - // Note: Enhance showResourcePlan to display all the pools, triggers and mappings. - DataOutputStream out = getOutputStream(showResourcePlanDesc.getResFile()); - try { - String rpName = showResourcePlanDesc.getResourcePlanName(); - if (rpName != null) { - formatter.showFullResourcePlan(out, db.getResourcePlan(rpName)); - } else { - formatter.showResourcePlans(out, db.getAllResourcePlans()); - } - } catch (Exception e) { - throw new HiveException(e); - } finally { - IOUtils.closeStream(out); - } - return 0; - } - - // Note: the resource plan operations are going to be annotated with namespace based on the config - // inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see - // that there exist other namespaces, because one HS2 always operates inside just one and we - // don't want this complexity to bleed everywhere. Therefore, this code doesn't care about - // namespaces - Hive.java will transparently scope everything. That's the idea anyway. - private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException { - if (desc.shouldValidate()) { - WMValidateResourcePlanResponse result = db.validateResourcePlan(desc.getResourcePlanName()); - try (DataOutputStream out = getOutputStream(desc.getResFile())) { - formatter.showErrors(out, result); - } catch (IOException e) { - throw new HiveException(e); - }; - return 0; - } - - WMNullableResourcePlan resourcePlan = desc.getResourcePlan(); - final WorkloadManager wm = WorkloadManager.getInstance(); - final TezSessionPoolManager pm = TezSessionPoolManager.getInstance(); - boolean isActivate = false, isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); - if (resourcePlan.getStatus() != null) { - isActivate = resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE; - } - - WMFullResourcePlan appliedRp = db.alterResourcePlan(desc.getResourcePlanName(), resourcePlan, - desc.isEnableActivate(), desc.isForceDeactivate(), desc.isReplace()); - boolean mustHaveAppliedChange = isActivate || desc.isForceDeactivate(); - if (!mustHaveAppliedChange && !desc.isReplace()) { - return 0; // The modification cannot affect an active plan. - } - if (appliedRp == null && !mustHaveAppliedChange) { - return 0; // Replacing an inactive plan. - } - if (wm == null && isInTest) { - return 0; // Skip for tests if WM is not present. - } - - if ((appliedRp == null) != desc.isForceDeactivate()) { - throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable"); - // TODO: shut down HS2? - } - assert appliedRp == null || appliedRp.getPlan().getStatus() == WMResourcePlanStatus.ACTIVE; - - handleWorkloadManagementServiceChange(wm, pm, isActivate, appliedRp); - return 0; - } - - private int handleWorkloadManagementServiceChange(WorkloadManager wm, TezSessionPoolManager pm, - boolean isActivate, WMFullResourcePlan appliedRp) throws HiveException { - String name = null; - if (isActivate) { - name = appliedRp.getPlan().getName(); - LOG.info("Activating a new resource plan " + name + ": " + appliedRp); - } else { - LOG.info("Disabling workload management"); - } - if (wm != null) { - // Note: as per our current constraints, the behavior of two parallel activates is - // undefined; although only one will succeed and the other will receive exception. - // We need proper (semi-)transactional modifications to support this without hacks. - ListenableFuture future = wm.updateResourcePlanAsync(appliedRp); - boolean isOk = false; - try { - // Note: we may add an async option in future. For now, let the task fail for the user. - future.get(); - isOk = true; - if (isActivate) { - LOG.info("Successfully activated resource plan " + name); - } else { - LOG.info("Successfully disabled workload management"); - } - } catch (InterruptedException | ExecutionException e) { - throw new HiveException(e); - } finally { - if (!isOk) { - if (isActivate) { - LOG.error("Failed to activate resource plan " + name); - } else { - LOG.error("Failed to disable workload management"); - } - // TODO: shut down HS2? - } - } - } - if (pm != null) { - Collection appliedTriggers = pm.updateTriggers(appliedRp); - LOG.info("Updated tez session pool manager with active resource plan: {} appliedTriggers: {}", name, appliedTriggers); - } - return 0; - } - - private int dropResourcePlan(Hive db, DropResourcePlanDesc desc) throws HiveException { - db.dropResourcePlan(desc.getRpName(), desc.getIfExists()); - return 0; - } - - private int createWMTrigger(Hive db, CreateWMTriggerDesc desc) throws HiveException { - validateTrigger(desc.getTrigger()); - db.createWMTrigger(desc.getTrigger()); - return 0; - } - - private void validateTrigger(final WMTrigger trigger) throws HiveException { - try { - ExecutionTrigger.fromWMTrigger(trigger); - } catch (IllegalArgumentException e) { - throw new HiveException(e); - } - } - - private int alterWMTrigger(Hive db, AlterWMTriggerDesc desc) throws HiveException { - validateTrigger(desc.getTrigger()); - db.alterWMTrigger(desc.getTrigger()); - return 0; - } - - private int dropWMTrigger(Hive db, DropWMTriggerDesc desc) throws HiveException { - db.dropWMTrigger(desc.getRpName(), desc.getTriggerName()); - return 0; - } - - private int createOrAlterWMPool(Hive db, CreateOrAlterWMPoolDesc desc) throws HiveException { - if (desc.isUpdate()) { - db.alterWMPool(desc.getAlterPool(), desc.getPoolPath()); - } else { - db.createWMPool(desc.getCreatePool()); - } - return 0; - } - - private int dropWMPool(Hive db, DropWMPoolDesc desc) throws HiveException { - db.dropWMPool(desc.getResourcePlanName(), desc.getPoolPath()); - return 0; - } - - private int createOrAlterWMMapping(Hive db, CreateOrAlterWMMappingDesc desc) throws HiveException { - db.createOrUpdateWMMapping(desc.getMapping(), desc.isUpdate()); - return 0; - } - - private int dropWMMapping(Hive db, DropWMMappingDesc desc) throws HiveException { - db.dropWMMapping(desc.getMapping()); - return 0; - } - - private int createOrDropTriggerToPoolMapping(Hive db, CreateOrDropTriggerToPoolMappingDesc desc) - throws HiveException { - if (!desc.isUnmanagedPool()) { - db.createOrDropTriggerToPoolMapping(desc.getResourcePlanName(), desc.getTriggerName(), - desc.getPoolPath(), desc.shouldDrop()); - } else { - assert desc.getPoolPath() == null; - WMTrigger trigger = new WMTrigger(desc.getResourcePlanName(), desc.getTriggerName()); - // If we are dropping from unmanaged, unset the flag; and vice versa - trigger.setIsInUnmanaged(!desc.shouldDrop()); - db.alterWMTrigger(trigger); - } - return 0; - } - private int insertCommitWork(Hive db, InsertCommitHookDesc insertCommitHookDesc) throws MetaException { boolean failed = true; HiveMetaHook hook = insertCommitHookDesc.getTable().getStorageHandler().getMetaHook(); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index d2c3f7b1a6..bd6ae715d6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -97,6 +97,21 @@ import org.apache.hadoop.hive.ql.ddl.table.ShowTablePropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolAddTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolDropTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterResourcePlanDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMMappingDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMPoolDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterWMTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateResourcePlanDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMMappingDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMPoolDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.CreateWMTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropResourcePlanDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMMappingDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMPoolDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.DropWMTriggerDesc; +import org.apache.hadoop.hive.ql.ddl.workloadmanagement.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -128,29 +143,18 @@ import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; -import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.BasicStatsWork; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; -import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; -import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; -import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.DDLDesc; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; -import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; -import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; -import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -168,7 +172,6 @@ import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; @@ -925,10 +928,9 @@ private void analyzeCreateResourcePlan(ASTNode ast) throws SemanticException { default: throw new SemanticException("Invalid create arguments " + ast.toStringTree()); } } - CreateResourcePlanDesc desc = new CreateResourcePlanDesc( - resourcePlanName, queryParallelism, likeName, ifNotExists); + CreateResourcePlanDesc desc = new CreateResourcePlanDesc(resourcePlanName, queryParallelism, likeName, ifNotExists); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeShowResourcePlan(ASTNode ast) throws SemanticException { @@ -939,11 +941,10 @@ private void analyzeShowResourcePlan(ASTNode ast) throws SemanticException { if (ast.getChildCount() > 1) { throw new SemanticException("Invalid syntax for SHOW RESOURCE PLAN statement"); } - ShowResourcePlanDesc showResourcePlanDesc = new ShowResourcePlanDesc(rpName, ctx.getResFile()); + ShowResourcePlanDesc showResourcePlanDesc = new ShowResourcePlanDesc(rpName, ctx.getResFile().toString()); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), showResourcePlanDesc))); - setFetchTask(createFetchTask(showResourcePlanDesc.getSchema(rpName))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showResourcePlanDesc))); + setFetchTask(createFetchTask(showResourcePlanDesc.getSchema())); } private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { @@ -958,10 +959,9 @@ private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { case HiveParser.TOK_DISABLE: WMNullableResourcePlan anyRp = new WMNullableResourcePlan(); anyRp.setStatus(WMResourcePlanStatus.ENABLED); - AlterResourcePlanDesc desc = new AlterResourcePlanDesc( - anyRp, null, false, false, true, false); + AlterResourcePlanDesc desc = new AlterResourcePlanDesc(anyRp, null, false, false, true, false, null); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); return; default: // Continue to handle changes to a specific plan. } @@ -1051,16 +1051,17 @@ private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { "Unexpected token in alter resource plan statement: " + child.getType()); } } - AlterResourcePlanDesc desc = new AlterResourcePlanDesc( - resourcePlan, rpName, validate, isEnableActivate, false, isReplace); + String resFile = null; if (validate) { ctx.setResFile(ctx.getLocalTmpPath()); - desc.setResFile(ctx.getResFile().toString()); + resFile = ctx.getResFile().toString(); } + AlterResourcePlanDesc desc = new AlterResourcePlanDesc(resourcePlan, rpName, validate, isEnableActivate, false, + isReplace, resFile); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); if (validate) { - setFetchTask(createFetchTask(AlterResourcePlanDesc.getSchema())); + setFetchTask(createFetchTask(AlterResourcePlanDesc.SCHEMA)); } } @@ -1081,8 +1082,7 @@ private void analyzeDropResourcePlan(ASTNode ast) throws SemanticException { } DropResourcePlanDesc desc = new DropResourcePlanDesc(rpName, ifExists); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeCreateTrigger(ASTNode ast) throws SemanticException { @@ -1100,7 +1100,7 @@ private void analyzeCreateTrigger(ASTNode ast) throws SemanticException { CreateWMTriggerDesc desc = new CreateWMTriggerDesc(trigger); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private String buildTriggerExpression(ASTNode ast) throws SemanticException { @@ -1156,7 +1156,7 @@ private void analyzeAlterTrigger(ASTNode ast) throws SemanticException { AlterWMTriggerDesc desc = new AlterWMTriggerDesc(trigger); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeDropTrigger(ASTNode ast) throws SemanticException { @@ -1168,8 +1168,7 @@ private void analyzeDropTrigger(ASTNode ast) throws SemanticException { DropWMTriggerDesc desc = new DropWMTriggerDesc(rpName, triggerName); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeCreatePool(ASTNode ast) throws SemanticException { @@ -1210,10 +1209,9 @@ private void analyzeCreatePool(ASTNode ast) throws SemanticException { if (!pool.isSetQueryParallelism()) { throw new SemanticException("query_parallelism should be specified for a pool"); } - CreateOrAlterWMPoolDesc desc = new CreateOrAlterWMPoolDesc(pool, poolPath, false); + CreateWMPoolDesc desc = new CreateWMPoolDesc(pool); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeAlterPool(ASTNode ast) throws SemanticException { @@ -1244,9 +1242,13 @@ private void analyzeAlterPool(ASTNode ast) throws SemanticException { hasTrigger = true; boolean drop = child.getType() == HiveParser.TOK_DROP_TRIGGER; String triggerName = unescapeIdentifier(param.getText()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - new CreateOrDropTriggerToPoolMappingDesc( - rpName, triggerName, poolPath, drop, isUnmanagedPool)))); + if (drop) { + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), + new AlterPoolDropTriggerDesc(rpName, triggerName, poolPath, isUnmanagedPool)))); + } else { + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), + new AlterPoolAddTriggerDesc(rpName, triggerName, poolPath, isUnmanagedPool)))); + } } else { if (isUnmanagedPool) { throw new SemanticException("Cannot alter the unmanaged pool"); @@ -1282,8 +1284,8 @@ private void analyzeAlterPool(ASTNode ast) throws SemanticException { if (!poolChanges.isSetPoolPath()) { poolChanges.setPoolPath(poolPath); } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - new CreateOrAlterWMPoolDesc(poolChanges, poolPath, true)))); + AlterWMPoolDesc ddlDesc = new AlterWMPoolDesc(poolChanges, poolPath); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), ddlDesc))); } } @@ -1296,8 +1298,7 @@ private void analyzeDropPool(ASTNode ast) throws SemanticException { DropWMPoolDesc desc = new DropWMPoolDesc(rpName, poolPath); addServiceOutput(); - rootTasks.add(TaskFactory.get( - new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeCreateOrAlterMapping(ASTNode ast, boolean update) throws SemanticException { @@ -1316,9 +1317,14 @@ private void analyzeCreateOrAlterMapping(ASTNode ast, boolean update) throws Sem mapping.setOrdering(Integer.valueOf(ast.getChild(4).getText())); } - CreateOrAlterWMMappingDesc desc = new CreateOrAlterWMMappingDesc(mapping, update); + org.apache.hadoop.hive.ql.ddl.DDLDesc desc = null; + if (update) { + desc = new AlterWMMappingDesc(mapping); + } else { + desc = new CreateWMMappingDesc(mapping); + } addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeDropMapping(ASTNode ast) throws SemanticException { @@ -1331,7 +1337,7 @@ private void analyzeDropMapping(ASTNode ast) throws SemanticException { DropWMMappingDesc desc = new DropWMMappingDesc(new WMMapping(rpName, entityType, entityName)); addServiceOutput(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); } private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { @@ -4195,51 +4201,6 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) alterTblDesc))); } - /** - * Analyze skewed column names - * - * @param skewedColNames - * @param child - * @return - * @throws SemanticException - */ - private List analyzeAlterTableSkewedColNames(List skewedColNames, - ASTNode child) throws SemanticException { - Tree nNode = child.getChild(0); - if (nNode == null) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - ASTNode nAstNode = (ASTNode) nNode; - if (nAstNode.getToken().getType() != HiveParser.TOK_TABCOLNAME) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - skewedColNames = getColumnNames(nAstNode); - } - } - return skewedColNames; - } - - /** - * Given a ASTNode, return list of values. - * - * use case: - * create table xyz list bucketed (col1) with skew (1,2,5) - * AST Node is for (1,2,5) - * - * @param ast - * @return - */ - private List getColumnValues(ASTNode ast) { - List colList = new ArrayList(); - int numCh = ast.getChildCount(); - for (int i = 0; i < numCh; i++) { - ASTNode child = (ASTNode) ast.getChild(i); - colList.add(stripQuotes(child.getText()).toLowerCase()); - } - return colList; - } - - /** * Analyze alter table's skewed location * diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java deleted file mode 100644 index 53f1f71399..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -@Explain(displayName = "Create/Alter Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateOrAlterWMPoolDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 4872940135771213510L; - - private WMPool createPool; - private WMNullablePool alterPool; - private String poolPath; - private boolean update; - - public CreateOrAlterWMPoolDesc() {} - - public CreateOrAlterWMPoolDesc(WMPool pool, String poolPath, boolean update) { - this.createPool = pool; - this.poolPath = poolPath; - this.update = update; - } - - public CreateOrAlterWMPoolDesc(WMNullablePool pool, String poolPath, boolean update) { - this.alterPool = pool; - this.poolPath = poolPath; - this.update = update; - } - - @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public Object getPool() { - return createPool == null ? alterPool : createPool; - } - - public WMPool getCreatePool() { - return createPool; - } - - public WMNullablePool getAlterPool() { - return alterPool; - } - - public void setCreatePool(WMPool pool) { - this.createPool = pool; - } - - public void setAlterPool(WMNullablePool pool) { - this.alterPool = pool; - } - - @Explain(displayName="poolPath", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getPoolPath() { - return poolPath; - } - - public void setPoolPath(String poolPath) { - this.poolPath = poolPath; - } - - @Explain(displayName="isUpdate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public boolean isUpdate() { - return update; - } - - public void setUpdate(boolean update) { - this.update = update; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index e6f3a6f917..1901defa27 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -52,23 +52,6 @@ private ShowConfDesc showConfDesc; - private CreateResourcePlanDesc createResourcePlanDesc; - private ShowResourcePlanDesc showResourcePlanDesc; - private DropResourcePlanDesc dropResourcePlanDesc; - private AlterResourcePlanDesc alterResourcePlanDesc; - - private CreateWMTriggerDesc createWMTriggerDesc; - private AlterWMTriggerDesc alterWMTriggerDesc; - private DropWMTriggerDesc dropWMTriggerDesc; - - private CreateOrAlterWMPoolDesc wmPoolDesc; - private DropWMPoolDesc dropWMPoolDesc; - - private CreateOrAlterWMMappingDesc wmMappingDesc; - private DropWMMappingDesc dropWMMappingDesc; - - private CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc; - private ReplRemoveFirstIncLoadPendFlagDesc replSetFirstIncLoadFlagDesc; boolean needLock = false; @@ -241,78 +224,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.killQueryDesc = killQueryDesc; } - public DDLWork(HashSet inputs, HashSet outputs, - CreateResourcePlanDesc createResourcePlanDesc) { - this(inputs, outputs); - this.createResourcePlanDesc = createResourcePlanDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - ShowResourcePlanDesc showResourcePlanDesc) { - this(inputs, outputs); - this.showResourcePlanDesc = showResourcePlanDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - DropResourcePlanDesc dropResourcePlanDesc) { - this(inputs, outputs); - this.dropResourcePlanDesc = dropResourcePlanDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - AlterResourcePlanDesc alterResourcePlanDesc) { - this(inputs, outputs); - this.alterResourcePlanDesc = alterResourcePlanDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CreateWMTriggerDesc createWMTriggerDesc) { - this(inputs, outputs); - this.createWMTriggerDesc = createWMTriggerDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - AlterWMTriggerDesc alterWMTriggerDesc) { - this(inputs, outputs); - this.alterWMTriggerDesc = alterWMTriggerDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - DropWMTriggerDesc dropWMTriggerDesc) { - this(inputs, outputs); - this.dropWMTriggerDesc = dropWMTriggerDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CreateOrAlterWMPoolDesc wmPoolDesc) { - this(inputs, outputs); - this.wmPoolDesc = wmPoolDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - DropWMPoolDesc dropWMPoolDesc) { - this(inputs, outputs); - this.dropWMPoolDesc = dropWMPoolDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CreateOrAlterWMMappingDesc wmMappingDesc) { - this(inputs, outputs); - this.wmMappingDesc = wmMappingDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - DropWMMappingDesc dropWMMappingDesc) { - this(inputs, outputs); - this.dropWMMappingDesc = dropWMMappingDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) { - this(inputs, outputs); - this.triggerToPoolMappingDesc = triggerToPoolMappingDesc; - } - public DDLWork(HashSet inputs, HashSet outputs, ReplRemoveFirstIncLoadPendFlagDesc replSetFirstIncLoadFlagDesc) { this(inputs, outputs); @@ -461,60 +372,6 @@ public InsertCommitHookDesc getInsertCommitHookDesc() { return insertCommitHookDesc; } - @Explain(displayName = "Create resource plan") - public CreateResourcePlanDesc getCreateResourcePlanDesc() { - return createResourcePlanDesc; - } - - @Explain(displayName = "Show resource plan") - public ShowResourcePlanDesc getShowResourcePlanDesc() { - return showResourcePlanDesc; - } - - public DropResourcePlanDesc getDropResourcePlanDesc() { - return dropResourcePlanDesc; - } - - public AlterResourcePlanDesc getAlterResourcePlanDesc() { - return alterResourcePlanDesc; - } - - public CreateWMTriggerDesc getCreateWMTriggerDesc() { - return createWMTriggerDesc; - } - - public AlterWMTriggerDesc getAlterWMTriggerDesc() { - return alterWMTriggerDesc; - } - - public DropWMTriggerDesc getDropWMTriggerDesc() { - return dropWMTriggerDesc; - } - - public CreateOrAlterWMPoolDesc getWmPoolDesc() { - return wmPoolDesc; - } - - public DropWMPoolDesc getDropWMPoolDesc() { - return dropWMPoolDesc; - } - - public CreateOrAlterWMMappingDesc getWmMappingDesc() { - return wmMappingDesc; - } - - public DropWMMappingDesc getDropWMMappingDesc() { - return dropWMMappingDesc; - } - - public CreateOrDropTriggerToPoolMappingDesc getTriggerToPoolMappingDesc() { - return triggerToPoolMappingDesc; - } - - public void setTriggerToPoolMappingDesc(CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) { - this.triggerToPoolMappingDesc = triggerToPoolMappingDesc; - } - public ReplRemoveFirstIncLoadPendFlagDesc getReplSetFirstIncLoadFlagDesc() { return replSetFirstIncLoadFlagDesc; } diff --git ql/src/test/queries/clientpositive/resourceplan.q ql/src/test/queries/clientpositive/resourceplan.q index 93d848ba33..8bc5697a6c 100644 --- ql/src/test/queries/clientpositive/resourceplan.q +++ ql/src/test/queries/clientpositive/resourceplan.q @@ -24,12 +24,16 @@ SELECT * FROM SYS.WM_RESOURCEPLANS; -- Create and show plan_1. CREATE RESOURCE PLAN plan_1; +EXPLAIN SHOW RESOURCE PLANS; SHOW RESOURCE PLANS; +EXPLAIN SHOW RESOURCE PLAN plan_1; SHOW RESOURCE PLAN plan_1; SELECT * FROM SYS.WM_RESOURCEPLANS; -- Create and show plan_2. +EXPLAIN CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5; CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5; +EXPLAIN ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10; ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10; SHOW RESOURCE PLANS; SHOW RESOURCE PLAN plan_2; @@ -65,11 +69,14 @@ SELECT * FROM SYS.WM_RESOURCEPLANS; -- Will fail for now; there are no pools. +EXPLAIN ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1; ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1; SELECT * FROM SYS.WM_RESOURCEPLANS; -- Shouldn't be able to rename or modify an enabled plan. +EXPLAIN ALTER RESOURCE PLAN plan_3 ENABLE; ALTER RESOURCE PLAN plan_3 ENABLE; +EXPLAIN ALTER RESOURCE PLAN plan_3 RENAME TO plan_4; ALTER RESOURCE PLAN plan_3 RENAME TO plan_4; ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30; ALTER RESOURCE PLAN plan_3 DISABLE; @@ -137,7 +144,8 @@ SELECT * FROM SYS.WM_RESOURCEPLANS; -- Drop resource plan. -- --- Fail, active plan. +-- Fail, active plan.i +EXPLAIN DROP RESOURCE PLAN plan_2; DROP RESOURCE PLAN plan_2; -- Success. @@ -166,6 +174,7 @@ drop table wm_test; CREATE RESOURCE PLAN plan_1; +EXPLAIN CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL; CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL; SELECT * FROM SYS.WM_TRIGGERS; @@ -189,9 +198,11 @@ CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '-1000' DO KILL; CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '30hour' DO MOVE TO slow_pool; SELECT * FROM SYS.WM_TRIGGERS; +EXPLAIN ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL; ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL; SELECT * FROM SYS.WM_TRIGGERS; +EXPLAIN DROP TRIGGER plan_1.trigger_1; DROP TRIGGER plan_1.trigger_1; SELECT * FROM SYS.WM_TRIGGERS; @@ -231,6 +242,8 @@ SELECT * FROM SYS.WM_TRIGGERS; -- -- Cannot create pool in active plans. +EXPLAIN CREATE POOL plan_1.default WITH + ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default'; CREATE POOL plan_1.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default'; @@ -252,6 +265,7 @@ CREATE POOL plan_2.default.c2 WITH ALTER RESOURCE PLAN plan_2 VALIDATE; ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE; +EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1; ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1; ALTER POOL plan_2.default.c2 SET SCHEDULING_POLICY='fair'; SELECT * FROM SYS.WM_POOLS; @@ -267,6 +281,7 @@ ALTER RESOURCE PLAN plan_2 DISABLE; ALTER POOL plan_2.default SET path = def; SELECT * FROM SYS.WM_POOLS; +EXPLAIN DROP POOL plan_2.default; DROP POOL plan_2.default; SELECT * FROM SYS.WM_POOLS; @@ -314,6 +329,7 @@ SELECT * FROM SYS.WM_RESOURCEPLANS; -- -- Success. +EXPLAIN ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1; ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1; ALTER POOL plan_2.def.c2 ADD TRIGGER trigger_1; @@ -346,6 +362,7 @@ ALTER POOL plan_2.def ADD TRIGGER trigger_2; SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS; -- Drop success. +EXPLAIN ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1; ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1; -- Drop fail, does not exist. @@ -362,12 +379,15 @@ SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS; -- User and group mappings. -- +EXPLAIN CREATE USER MAPPING "user1" IN plan_2 TO def; CREATE USER MAPPING "user1" IN plan_2 TO def; CREATE USER MAPPING 'user2' IN plan_2 TO def WITH ORDER 1; CREATE GROUP MAPPING "group1" IN plan_2 TO def.c1; CREATE APPLICATION MAPPING "app1" IN plan_2 TO def.c1; CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1; +EXPLAIN CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1; CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1; +EXPLAIN ALTER USER MAPPING "user1" IN plan_2 UNMANAGED; ALTER USER MAPPING "user1" IN plan_2 UNMANAGED; SHOW RESOURCE PLAN plan_2; @@ -377,7 +397,9 @@ SELECT * FROM SYS.WM_MAPPINGS; -- Drop pool failed, pool in use. DROP POOL plan_2.def.c1; +EXPLAIN DROP USER MAPPING "user2" in plan_2; DROP USER MAPPING "user2" in plan_2; +EXPLAIN DROP GROUP MAPPING "group2" in plan_2; DROP GROUP MAPPING "group2" in plan_2; DROP GROUP MAPPING "group3" in plan_2; DROP APPLICATION MAPPING "app1" in plan_2; diff --git ql/src/test/results/clientpositive/llap/resourceplan.q.out ql/src/test/results/clientpositive/llap/resourceplan.q.out index c0d6ec2905..69ba2171fb 100644 --- ql/src/test/results/clientpositive/llap/resourceplan.q.out +++ ql/src/test/results/clientpositive/llap/resourceplan.q.out @@ -3585,12 +3585,51 @@ PREHOOK: type: CREATE RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: CREATE RESOURCE PLAN plan_1 POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: EXPLAIN SHOW RESOURCE PLANS +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN SHOW RESOURCE PLANS +POSTHOOK: type: SHOW RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Resource plans + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW RESOURCE PLANS PREHOOK: type: SHOW RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: SHOW RESOURCE PLANS POSTHOOK: type: SHOW RESOURCEPLAN plan_1 DISABLED +PREHOOK: query: EXPLAIN SHOW RESOURCE PLAN plan_1 +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN SHOW RESOURCE PLAN plan_1 +POSTHOOK: type: SHOW RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Resource plans + resourcePlanName: plan_1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW RESOURCE PLAN plan_1 PREHOOK: type: SHOW RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -3608,11 +3647,40 @@ POSTHOOK: type: QUERY POSTHOOK: Input: sys@wm_resourceplans #### A masked pattern was here #### plan_1 default DISABLED NULL default +PREHOOK: query: EXPLAIN CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 +POSTHOOK: type: CREATE RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create ResourcePlan + planName: plan_2 + queryParallelism: 5 + PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 PREHOOK: type: CREATE RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan to modify: plan_2 + Resource plan changed fields: + shouldValidate: false + PREHOOK: query: ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -3646,7 +3714,7 @@ plan_2 default DISABLED 10 default PREHOOK: query: CREATE RESOURCE PLAN plan_2 PREHOOK: type: CREATE RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Resource plan plan_2 already exists +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Resource plan plan_2 already exists PREHOOK: query: CREATE RESOURCE PLAN IF NOT EXISTS plan_2 PREHOOK: type: CREATE RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -3656,7 +3724,7 @@ FAILED: SemanticException Invalid create arguments (tok_create_rp plan_3 (tok_qu PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_2 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Resource plan name should be unique: ) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. AlreadyExistsException(message:Resource plan name should be unique: ) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -3712,10 +3780,25 @@ POSTHOOK: Input: sys@wm_resourceplans #### A masked pattern was here #### plan_2 default DISABLED 10 default plan_3 default DISABLED NULL default +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan to modify: plan_3 + Resource plan changed fields: + shouldValidate: false + PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find pool: default1) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot find pool: default1) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -3726,19 +3809,49 @@ POSTHOOK: Input: sys@wm_resourceplans #### A masked pattern was here #### plan_2 default DISABLED 10 default plan_3 default DISABLED NULL default +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 ENABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 ENABLE +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan to modify: plan_3 + Resource plan changed fields: + shouldValidate: false + PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan to modify: plan_3 + Resource plan changed fields: + shouldValidate: false + PREHOOK: query: ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -3757,7 +3870,7 @@ plan_3 default DISABLED NULL default PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command)) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command)) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -3831,7 +3944,7 @@ plan_3 default ACTIVE NULL default PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -3845,7 +3958,7 @@ plan_3 default ACTIVE NULL default PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -3951,10 +4064,23 @@ POSTHOOK: Input: sys@wm_resourceplans #### A masked pattern was here #### plan_2 default ACTIVE 10 default plan_3 default DISABLED NULL default +PREHOOK: query: EXPLAIN DROP RESOURCE PLAN plan_2 +PREHOOK: type: DROP RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP RESOURCE PLAN plan_2 +POSTHOOK: type: DROP RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Resource plans + resourcePlanName: plan_2 + PREHOOK: query: DROP RESOURCE PLAN plan_2 PREHOOK: type: DROP RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Cannot drop an active resource plan) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Cannot drop an active resource plan) PREHOOK: query: DROP RESOURCE PLAN plan_3 PREHOOK: type: DROP RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -3972,7 +4098,7 @@ plan_2 default ACTIVE 10 default PREHOOK: query: DROP RESOURCE PLAN plan_99999 PREHOOK: type: DROP RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Resource plan plan_99999 does not exist +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Resource plan plan_99999 does not exist PREHOOK: query: DROP RESOURCE PLAN IF EXISTS plan_99999 PREHOOK: type: DROP RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -4035,6 +4161,19 @@ PREHOOK: type: CREATE RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: CREATE RESOURCE PLAN plan_1 POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: EXPLAIN CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL +POSTHOOK: type: CREATE TRIGGER +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create WM Trigger + trigger: + PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest @@ -4052,7 +4191,7 @@ plan_1 default trigger_1 BYTES_READ > '10kb' KILL PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN ELAPSED_TIME > 300 DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Trigger already exists, use alter: ) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. AlreadyExistsException(message:Trigger already exists, use alter: ) FAILED: ParseException line 4:60 mismatched input 'AND' expecting DO near ''30sec'' in create trigger statement FAILED: ParseException line 2:63 mismatched input 'OR' expecting DO near ''30second'' in create trigger statement FAILED: ParseException line 2:50 mismatched input '>=' expecting > near 'ELAPSED_TIME' in comparisionOperator @@ -4062,15 +4201,15 @@ FAILED: ParseException line 2:50 mismatched input '=' expecting > near 'ELAPSED_ PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '10k' DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.IllegalArgumentException: Invalid size unit k +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.IllegalArgumentException: Invalid size unit k PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '10 millis' DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.IllegalArgumentException: Invalid time unit millis +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.IllegalArgumentException: Invalid time unit millis PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '-1000' DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.IllegalArgumentException: Illegal value for counter limit. Expected a positive long value. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.IllegalArgumentException: Illegal value for counter limit. Expected a positive long value. PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '30hour' DO MOVE TO slow_pool PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest @@ -4086,6 +4225,19 @@ POSTHOOK: Input: sys@wm_triggers #### A masked pattern was here #### plan_1 default trigger_1 BYTES_READ > '10kb' KILL plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +PREHOOK: query: EXPLAIN ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL +PREHOOK: type: ALTER TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL +POSTHOOK: type: ALTER TRIGGER +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter WM Trigger + trigger: + PREHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL PREHOOK: type: ALTER TRIGGER PREHOOK: Output: dummyHostnameForTest @@ -4101,6 +4253,20 @@ POSTHOOK: Input: sys@wm_triggers #### A masked pattern was here #### plan_1 default trigger_1 BYTES_READ > '1GB' KILL plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +PREHOOK: query: EXPLAIN DROP TRIGGER plan_1.trigger_1 +PREHOOK: type: DROP TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP TRIGGER plan_1.trigger_1 +POSTHOOK: type: DROP TRIGGER +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop WM Trigger + resourcePlanName: plan_1 + triggerName: trigger_1 + PREHOOK: query: DROP TRIGGER plan_1.trigger_1 PREHOOK: type: DROP TRIGGER PREHOOK: Output: dummyHostnameForTest @@ -4118,7 +4284,7 @@ plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > '100mb' DO MOVE TO null_pool PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > '100KB' DO MOVE TO `default` PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest @@ -4195,11 +4361,11 @@ table default DISABLED 1 default PREHOOK: query: DROP TRIGGER plan_1.trigger_2 PREHOOK: type: DROP TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000gb" DO KILL PREHOOK: type: ALTER TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -4219,11 +4385,11 @@ table default DISABLED 1 default PREHOOK: query: DROP TRIGGER plan_1.trigger_2 PREHOOK: type: DROP TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000KB" DO KILL PREHOOK: type: ALTER TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -4248,17 +4414,32 @@ table default table BYTES_WRITTEN > '100KB' MOVE TO default table default trigger BYTES_WRITTEN > '100MB' MOVE TO default table default trigger1 ELAPSED_TIME > 10 KILL table default trigger2 ELAPSED_TIME > '1hour' KILL +PREHOOK: query: EXPLAIN CREATE POOL plan_1.default WITH + ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE POOL plan_1.default WITH + ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' +POSTHOOK: type: CREATE POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Pool + pool: + PREHOOK: query: CREATE POOL plan_1.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' PREHOOK: type: CREATE POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) FAILED: SemanticException alloc_fraction should be specified for a pool FAILED: SemanticException query_parallelism should be specified for a pool PREHOOK: query: CREATE POOL plan_2.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5 PREHOOK: type: CREATE POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Pool already exists: ) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. AlreadyExistsException(message:Pool already exists: ) PREHOOK: query: SELECT * FROM SYS.WM_POOLS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools @@ -4294,7 +4475,21 @@ Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default]) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default]) +PREHOOK: query: EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 +POSTHOOK: type: ALTER POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Pool + pool: + poolPath: default.c2 + PREHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest @@ -4375,10 +4570,23 @@ plan_2 default def 1.0 5 NULL plan_2 default def.c1 0.3 3 fair plan_2 default def.c2 0.7 1 NULL table default default 1.0 4 NULL +PREHOOK: query: EXPLAIN DROP POOL plan_2.default +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP POOL plan_2.default +POSTHOOK: type: DROP POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop WM Pool + poolName: plan_2 + PREHOOK: query: DROP POOL plan_2.default PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot delete pool: default) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot delete pool: default) PREHOOK: query: SELECT * FROM SYS.WM_POOLS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools @@ -4396,7 +4604,7 @@ PREHOOK: query: CREATE POOL plan_2.child1.child2 WITH QUERY_PARALLELISM=2, SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.8 PREHOOK: type: CREATE POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Pool path is invalid, the parent does not exist) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Pool path is invalid, the parent does not exist) PREHOOK: query: CREATE POOL `table`.`table` WITH SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1 PREHOOK: type: CREATE POOL @@ -4472,7 +4680,7 @@ table default table.pool.child2 0.7 3 fair PREHOOK: query: DROP POOL `table`.`table` PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Cannot drop a pool that has child pools) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Cannot drop a pool that has child pools) PREHOOK: query: SELECT * FROM SYS.WM_POOLS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools @@ -4493,7 +4701,7 @@ table default table.pool.child2 0.7 3 fair PREHOOK: query: DROP POOL `table`.default PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Cannot drop default pool of a resource plan) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Cannot drop default pool of a resource plan) PREHOOK: query: SELECT * FROM SYS.WM_POOLS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools @@ -4564,6 +4772,21 @@ POSTHOOK: Input: sys@wm_resourceplans plan_1 default ACTIVE NULL default plan_2 default DISABLED 10 def table default DISABLED 1 NULL +PREHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 +POSTHOOK: type: ALTER POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Trigger to pool mappings + resourcePlanName: plan_2 + Pool path: def.c1 + Trigger name: trigger_1 + PREHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest @@ -4666,11 +4889,11 @@ table default table.pool.child2 trigger2 PREHOOK: query: ALTER POOL plan_2.default ADD TRIGGER trigger_1 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find pool: default) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot find pool: default) PREHOOK: query: ALTER POOL plan_2.def ADD TRIGGER trigger_2 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools_to_triggers @@ -4685,6 +4908,21 @@ table default table table table default table.pool.child1 table table default table.pool.child1 trigger1 table default table.pool.child2 trigger2 +PREHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 +POSTHOOK: type: ALTER POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Trigger to pool mappings + resourcePlanName: plan_2 + Pool path: def.c1 + Trigger name: trigger_1 + PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest @@ -4693,7 +4931,7 @@ POSTHOOK: type: ALTER POOL PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_2 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) PREHOOK: query: DROP POOL `table`.`table`.pool.child1 PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest @@ -4714,6 +4952,19 @@ POSTHOOK: Input: sys@wm_pools_to_triggers #### A masked pattern was here #### plan_2 default def.c2 trigger_1 table default table table +PREHOOK: query: EXPLAIN CREATE USER MAPPING "user1" IN plan_2 TO def +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE USER MAPPING "user1" IN plan_2 TO def +POSTHOOK: type: CREATE MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Mapping + mapping: + PREHOOK: query: CREATE USER MAPPING "user1" IN plan_2 TO def PREHOOK: type: CREATE MAPPING PREHOOK: Output: dummyHostnameForTest @@ -4739,11 +4990,37 @@ PREHOOK: type: CREATE MAPPING PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1 POSTHOOK: type: CREATE MAPPING +PREHOOK: query: EXPLAIN CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 +POSTHOOK: type: CREATE MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Mapping + mapping: + PREHOOK: query: CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 PREHOOK: type: CREATE MAPPING PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 POSTHOOK: type: CREATE MAPPING +PREHOOK: query: EXPLAIN ALTER USER MAPPING "user1" IN plan_2 UNMANAGED +PREHOOK: type: ALTER MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER USER MAPPING "user1" IN plan_2 UNMANAGED +POSTHOOK: type: ALTER MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Mapping + mapping: + PREHOOK: query: ALTER USER MAPPING "user1" IN plan_2 UNMANAGED PREHOOK: type: ALTER MAPPING PREHOOK: Output: dummyHostnameForTest @@ -4784,12 +5061,38 @@ plan_2 default USER user2 def 1 PREHOOK: query: DROP POOL plan_2.def.c1 PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Please remove all mappings for this pool.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Please remove all mappings for this pool.) +PREHOOK: query: EXPLAIN DROP USER MAPPING "user2" in plan_2 +PREHOOK: type: DROP MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP USER MAPPING "user2" in plan_2 +POSTHOOK: type: DROP MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop mapping + mapping: + PREHOOK: query: DROP USER MAPPING "user2" in plan_2 PREHOOK: type: DROP MAPPING PREHOOK: Output: dummyHostnameForTest POSTHOOK: query: DROP USER MAPPING "user2" in plan_2 POSTHOOK: type: DROP MAPPING +PREHOOK: query: EXPLAIN DROP GROUP MAPPING "group2" in plan_2 +PREHOOK: type: DROP MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP GROUP MAPPING "group2" in plan_2 +POSTHOOK: type: DROP MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop mapping + mapping: + PREHOOK: query: DROP GROUP MAPPING "group2" in plan_2 PREHOOK: type: DROP MAPPING PREHOOK: Output: dummyHostnameForTest diff --git ql/src/test/results/clientpositive/resourceplan.q.out ql/src/test/results/clientpositive/resourceplan.q.out new file mode 100644 index 0000000000..69ba2171fb --- /dev/null +++ ql/src/test/results/clientpositive/resourceplan.q.out @@ -0,0 +1,5440 @@ +PREHOOK: query: show grant user hive_test_user +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user +POSTHOOK: type: SHOW_GRANT +default alltypesorc hive_test_user USER DELETE true -1 hive_test_user +default alltypesorc hive_test_user USER INSERT true -1 hive_test_user +default alltypesorc hive_test_user USER SELECT true -1 hive_test_user +default alltypesorc hive_test_user USER UPDATE true -1 hive_test_user +default alltypesparquet hive_test_user USER DELETE true -1 hive_test_user +default alltypesparquet hive_test_user USER INSERT true -1 hive_test_user +default alltypesparquet hive_test_user USER SELECT true -1 hive_test_user +default alltypesparquet hive_test_user USER UPDATE true -1 hive_test_user +default cbo_t1 hive_test_user USER DELETE true -1 hive_test_user +default cbo_t1 hive_test_user USER INSERT true -1 hive_test_user +default cbo_t1 hive_test_user USER SELECT true -1 hive_test_user +default cbo_t1 hive_test_user USER UPDATE true -1 hive_test_user +default cbo_t2 hive_test_user USER DELETE true -1 hive_test_user +default cbo_t2 hive_test_user USER INSERT true -1 hive_test_user +default cbo_t2 hive_test_user USER SELECT true -1 hive_test_user +default cbo_t2 hive_test_user USER UPDATE true -1 hive_test_user +default cbo_t3 hive_test_user USER DELETE true -1 hive_test_user +default cbo_t3 hive_test_user USER INSERT true -1 hive_test_user +default cbo_t3 hive_test_user USER SELECT true -1 hive_test_user +default cbo_t3 hive_test_user USER UPDATE true -1 hive_test_user +default lineitem hive_test_user USER DELETE true -1 hive_test_user +default lineitem hive_test_user USER INSERT true -1 hive_test_user +default lineitem hive_test_user USER SELECT true -1 hive_test_user +default lineitem hive_test_user USER UPDATE true -1 hive_test_user +default part hive_test_user USER DELETE true -1 hive_test_user +default part hive_test_user USER INSERT true -1 hive_test_user +default part hive_test_user USER SELECT true -1 hive_test_user +default part hive_test_user USER UPDATE true -1 hive_test_user +default src hive_test_user USER DELETE true -1 hive_test_user +default src hive_test_user USER INSERT true -1 hive_test_user +default src hive_test_user USER SELECT true -1 hive_test_user +default src hive_test_user USER UPDATE true -1 hive_test_user +default src1 hive_test_user USER DELETE true -1 hive_test_user +default src1 hive_test_user USER INSERT true -1 hive_test_user +default src1 hive_test_user USER SELECT true -1 hive_test_user +default src1 hive_test_user USER UPDATE true -1 hive_test_user +default src_cbo hive_test_user USER DELETE true -1 hive_test_user +default src_cbo hive_test_user USER INSERT true -1 hive_test_user +default src_cbo hive_test_user USER SELECT true -1 hive_test_user +default src_cbo hive_test_user USER UPDATE true -1 hive_test_user +default src_json hive_test_user USER DELETE true -1 hive_test_user +default src_json hive_test_user USER INSERT true -1 hive_test_user +default src_json hive_test_user USER SELECT true -1 hive_test_user +default src_json hive_test_user USER UPDATE true -1 hive_test_user +default src_sequencefile hive_test_user USER DELETE true -1 hive_test_user +default src_sequencefile hive_test_user USER INSERT true -1 hive_test_user +default src_sequencefile hive_test_user USER SELECT true -1 hive_test_user +default src_sequencefile hive_test_user USER UPDATE true -1 hive_test_user +default src_thrift hive_test_user USER DELETE true -1 hive_test_user +default src_thrift hive_test_user USER INSERT true -1 hive_test_user +default src_thrift hive_test_user USER SELECT true -1 hive_test_user +default src_thrift hive_test_user USER UPDATE true -1 hive_test_user +default srcbucket hive_test_user USER DELETE true -1 hive_test_user +default srcbucket hive_test_user USER INSERT true -1 hive_test_user +default srcbucket hive_test_user USER SELECT true -1 hive_test_user +default srcbucket hive_test_user USER UPDATE true -1 hive_test_user +default srcbucket2 hive_test_user USER DELETE true -1 hive_test_user +default srcbucket2 hive_test_user USER INSERT true -1 hive_test_user +default srcbucket2 hive_test_user USER SELECT true -1 hive_test_user +default srcbucket2 hive_test_user USER UPDATE true -1 hive_test_user +default srcpart hive_test_user USER DELETE true -1 hive_test_user +default srcpart hive_test_user USER INSERT true -1 hive_test_user +default srcpart hive_test_user USER SELECT true -1 hive_test_user +default srcpart hive_test_user USER UPDATE true -1 hive_test_user +PREHOOK: query: CREATE DATABASE IF NOT EXISTS SYS +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:SYS +POSTHOOK: query: CREATE DATABASE IF NOT EXISTS SYS +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:SYS +PREHOOK: query: USE SYS +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:sys +POSTHOOK: query: USE SYS +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint, + `BUCKET_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_BUCKETING_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"BUCKET_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"BUCKETING_COLS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@BUCKETING_COLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint, + `BUCKET_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_BUCKETING_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"BUCKET_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"BUCKETING_COLS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@BUCKETING_COLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint, + CONSTRAINT `SYS_PK_CDS` PRIMARY KEY (`CD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\" +FROM + \"CDS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@CDS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint, + CONSTRAINT `SYS_PK_CDS` PRIMARY KEY (`CD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\" +FROM + \"CDS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@CDS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint, + `COMMENT` string, + `COLUMN_NAME` string, + `TYPE_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_COLUMN_V2` PRIMARY KEY (`CD_ID`,`COLUMN_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\", + \"COMMENT\", + \"COLUMN_NAME\", + \"TYPE_NAME\", + \"INTEGER_IDX\" +FROM + \"COLUMNS_V2\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@COLUMNS_V2 +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint, + `COMMENT` string, + `COLUMN_NAME` string, + `TYPE_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_COLUMN_V2` PRIMARY KEY (`CD_ID`,`COLUMN_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\", + \"COMMENT\", + \"COLUMN_NAME\", + \"TYPE_NAME\", + \"INTEGER_IDX\" +FROM + \"COLUMNS_V2\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@COLUMNS_V2 +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_DATABASE_PARAMS` PRIMARY KEY (`DB_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"DATABASE_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@DATABASE_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_DATABASE_PARAMS` PRIMARY KEY (`DB_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"DATABASE_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@DATABASE_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint, + `DB_LOCATION_URI` string, + `NAME` string, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"DB_LOCATION_URI\", + \"NAME\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"DBS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@DBS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint, + `DB_LOCATION_URI` string, + `NAME` string, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"DB_LOCATION_URI\", + \"NAME\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"DBS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@DBS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `DB_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_GRANT_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"DB_PRIV\", + \"AUTHORIZER\" +FROM + \"DB_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@DB_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `DB_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_GRANT_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"DB_PRIV\", + \"AUTHORIZER\" +FROM + \"DB_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@DB_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` string, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `USER_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"USER_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"USER_PRIV\", + \"AUTHORIZER\" +FROM + \"GLOBAL_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@GLOBAL_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` string, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `USER_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"USER_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"USER_PRIV\", + \"AUTHORIZER\" +FROM + \"GLOBAL_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@GLOBAL_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint, + `CREATE_TIME` int, + `LAST_ACCESS_TIME` int, + `PART_NAME` string, + `SD_ID` bigint, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_PARTITIONS` PRIMARY KEY (`PART_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"CREATE_TIME\", + \"LAST_ACCESS_TIME\", + \"PART_NAME\", + \"SD_ID\", + \"TBL_ID\" +FROM + \"PARTITIONS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PARTITIONS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint, + `CREATE_TIME` int, + `LAST_ACCESS_TIME` int, + `PART_NAME` string, + `SD_ID` bigint, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_PARTITIONS` PRIMARY KEY (`PART_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"CREATE_TIME\", + \"LAST_ACCESS_TIME\", + \"PART_NAME\", + \"SD_ID\", + \"TBL_ID\" +FROM + \"PARTITIONS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PARTITIONS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint, + `PKEY_COMMENT` string, + `PKEY_NAME` string, + `PKEY_TYPE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEYS` PRIMARY KEY (`TBL_ID`,`PKEY_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PKEY_COMMENT\", + \"PKEY_NAME\", + \"PKEY_TYPE\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEYS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PARTITION_KEYS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint, + `PKEY_COMMENT` string, + `PKEY_NAME` string, + `PKEY_TYPE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEYS` PRIMARY KEY (`TBL_ID`,`PKEY_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PKEY_COMMENT\", + \"PKEY_NAME\", + \"PKEY_TYPE\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEYS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PARTITION_KEYS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint, + `PART_KEY_VAL` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEY_VALS` PRIMARY KEY (`PART_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PART_KEY_VAL\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEY_VALS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PARTITION_KEY_VALS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint, + `PART_KEY_VAL` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEY_VALS` PRIMARY KEY (`PART_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PART_KEY_VAL\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEY_VALS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PARTITION_KEY_VALS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_PARTITION_PARAMS` PRIMARY KEY (`PART_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"PARTITION_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PARTITION_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_PARTITION_PARAMS` PRIMARY KEY (`PART_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"PARTITION_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PARTITION_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_COL_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_COL_PRIV\", + \"AUTHORIZER\" +FROM + \"PART_COL_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PART_COL_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_COL_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_COL_PRIV\", + \"AUTHORIZER\" +FROM + \"PART_COL_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PART_COL_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_PRIV\", + \"AUTHORIZER\" +FROM + \"PART_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PART_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_PRIV\", + \"AUTHORIZER\" +FROM + \"PART_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PART_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint, + `CREATE_TIME` int, + `OWNER_NAME` string, + `ROLE_NAME` string, + CONSTRAINT `SYS_PK_ROLES` PRIMARY KEY (`ROLE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_ID\", + \"CREATE_TIME\", + \"OWNER_NAME\", + \"ROLE_NAME\" +FROM + \"ROLES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@ROLES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint, + `CREATE_TIME` int, + `OWNER_NAME` string, + `ROLE_NAME` string, + CONSTRAINT `SYS_PK_ROLES` PRIMARY KEY (`ROLE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_ID\", + \"CREATE_TIME\", + \"OWNER_NAME\", + \"ROLE_NAME\" +FROM + \"ROLES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@ROLES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint, + `ADD_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `ROLE_ID` bigint, + CONSTRAINT `SYS_PK_ROLE_MAP` PRIMARY KEY (`ROLE_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_GRANT_ID\", + \"ADD_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"ROLE_ID\" +FROM + \"ROLE_MAP\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@ROLE_MAP +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint, + `ADD_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `ROLE_ID` bigint, + CONSTRAINT `SYS_PK_ROLE_MAP` PRIMARY KEY (`ROLE_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_GRANT_ID\", + \"ADD_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"ROLE_ID\" +FROM + \"ROLE_MAP\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@ROLE_MAP +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint, + `CD_ID` bigint, + `INPUT_FORMAT` string, + `IS_COMPRESSED` boolean, + `IS_STOREDASSUBDIRECTORIES` boolean, + `LOCATION` string, + `NUM_BUCKETS` int, + `OUTPUT_FORMAT` string, + `SERDE_ID` bigint, + CONSTRAINT `SYS_PK_SDS` PRIMARY KEY (`SD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"CD_ID\", + \"INPUT_FORMAT\", + \"IS_COMPRESSED\", + \"IS_STOREDASSUBDIRECTORIES\", + \"LOCATION\", + \"NUM_BUCKETS\", + \"OUTPUT_FORMAT\", + \"SERDE_ID\" +FROM + \"SDS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SDS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint, + `CD_ID` bigint, + `INPUT_FORMAT` string, + `IS_COMPRESSED` boolean, + `IS_STOREDASSUBDIRECTORIES` boolean, + `LOCATION` string, + `NUM_BUCKETS` int, + `OUTPUT_FORMAT` string, + `SERDE_ID` bigint, + CONSTRAINT `SYS_PK_SDS` PRIMARY KEY (`SD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"CD_ID\", + \"INPUT_FORMAT\", + \"IS_COMPRESSED\", + \"IS_STOREDASSUBDIRECTORIES\", + \"LOCATION\", + \"NUM_BUCKETS\", + \"OUTPUT_FORMAT\", + \"SERDE_ID\" +FROM + \"SDS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SDS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SD_PARAMS` PRIMARY KEY (`SD_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SD_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SD_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SD_PARAMS` PRIMARY KEY (`SD_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SD_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SD_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` string, + `NEXT_VAL` bigint, + CONSTRAINT `SYS_PK_SEQUENCE_TABLE` PRIMARY KEY (`SEQUENCE_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SEQUENCE_NAME\", + \"NEXT_VAL\" +FROM + \"SEQUENCE_TABLE\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SEQUENCE_TABLE +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` string, + `NEXT_VAL` bigint, + CONSTRAINT `SYS_PK_SEQUENCE_TABLE` PRIMARY KEY (`SEQUENCE_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SEQUENCE_NAME\", + \"NEXT_VAL\" +FROM + \"SEQUENCE_TABLE\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SEQUENCE_TABLE +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint, + `NAME` string, + `SLIB` string, + CONSTRAINT `SYS_PK_SERDES` PRIMARY KEY (`SERDE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"NAME\", + \"SLIB\" +FROM + \"SERDES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SERDES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint, + `NAME` string, + `SLIB` string, + CONSTRAINT `SYS_PK_SERDES` PRIMARY KEY (`SERDE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"NAME\", + \"SLIB\" +FROM + \"SERDES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SERDES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SERDE_PARAMS` PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SERDE_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SERDE_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SERDE_PARAMS` PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SERDE_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SERDE_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint, + `SKEWED_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_COL_NAMES` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"SKEWED_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"SKEWED_COL_NAMES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_COL_NAMES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint, + `SKEWED_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_COL_NAMES` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"SKEWED_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"SKEWED_COL_NAMES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_COL_NAMES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint, + `STRING_LIST_ID_KID` bigint, + `LOCATION` string, + CONSTRAINT `SYS_PK_COL_VALUE_LOC_MAP` PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"STRING_LIST_ID_KID\", + \"LOCATION\" +FROM + \"SKEWED_COL_VALUE_LOC_MAP\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_COL_VALUE_LOC_MAP +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint, + `STRING_LIST_ID_KID` bigint, + `LOCATION` string, + CONSTRAINT `SYS_PK_COL_VALUE_LOC_MAP` PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"STRING_LIST_ID_KID\", + \"LOCATION\" +FROM + \"SKEWED_COL_VALUE_LOC_MAP\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_COL_VALUE_LOC_MAP +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST` PRIMARY KEY (`STRING_LIST_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\" +FROM + \"SKEWED_STRING_LIST\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_STRING_LIST +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST` PRIMARY KEY (`STRING_LIST_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\" +FROM + \"SKEWED_STRING_LIST\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_STRING_LIST +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint, + `STRING_LIST_VALUE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST_VALUES` PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\", + \"STRING_LIST_VALUE\", + \"INTEGER_IDX\" +FROM + \"SKEWED_STRING_LIST_VALUES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_STRING_LIST_VALUES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint, + `STRING_LIST_VALUE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST_VALUES` PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\", + \"STRING_LIST_VALUE\", + \"INTEGER_IDX\" +FROM + \"SKEWED_STRING_LIST_VALUES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_STRING_LIST_VALUES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint, + `STRING_LIST_ID_EID` bigint, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_VALUES` PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID_OID\", + \"STRING_LIST_ID_EID\", + \"INTEGER_IDX\" +FROM + \"SKEWED_VALUES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_VALUES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint, + `STRING_LIST_ID_EID` bigint, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_VALUES` PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID_OID\", + \"STRING_LIST_ID_EID\", + \"INTEGER_IDX\" +FROM + \"SKEWED_VALUES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_VALUES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint, + `COLUMN_NAME` string, + `ORDER` int, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SORT_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"COLUMN_NAME\", + \"ORDER\", + \"INTEGER_IDX\" +FROM + \"SORT_COLS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SORT_COLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint, + `COLUMN_NAME` string, + `ORDER` int, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SORT_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"COLUMN_NAME\", + \"ORDER\", + \"INTEGER_IDX\" +FROM + \"SORT_COLS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SORT_COLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_TABLE_PARAMS` PRIMARY KEY (`TBL_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"TABLE_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TABLE_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_TABLE_PARAMS` PRIMARY KEY (`TBL_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"TABLE_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TABLE_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `LAST_ACCESS_TIME` int, + `OWNER` string, + `RETENTION` int, + `SD_ID` bigint, + `TBL_NAME` string, + `TBL_TYPE` string, + `VIEW_EXPANDED_TEXT` string, + `VIEW_ORIGINAL_TEXT` string, + `IS_REWRITE_ENABLED` boolean, + CONSTRAINT `SYS_PK_TBLS` PRIMARY KEY (`TBL_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"LAST_ACCESS_TIME\", + \"OWNER\", + \"RETENTION\", + \"SD_ID\", + \"TBL_NAME\", + \"TBL_TYPE\", + \"VIEW_EXPANDED_TEXT\", + \"VIEW_ORIGINAL_TEXT\", + \"IS_REWRITE_ENABLED\" +FROM \"TBLS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TBLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `LAST_ACCESS_TIME` int, + `OWNER` string, + `RETENTION` int, + `SD_ID` bigint, + `TBL_NAME` string, + `TBL_TYPE` string, + `VIEW_EXPANDED_TEXT` string, + `VIEW_ORIGINAL_TEXT` string, + `IS_REWRITE_ENABLED` boolean, + CONSTRAINT `SYS_PK_TBLS` PRIMARY KEY (`TBL_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"LAST_ACCESS_TIME\", + \"OWNER\", + \"RETENTION\", + \"SD_ID\", + \"TBL_NAME\", + \"TBL_TYPE\", + \"VIEW_EXPANDED_TEXT\", + \"VIEW_ORIGINAL_TEXT\", + \"IS_REWRITE_ENABLED\" +FROM \"TBLS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TBLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( + `MV_CREATION_METADATA_ID` bigint, + `DB_NAME` string, + `TBL_NAME` string, + `TXN_LIST` string, + CONSTRAINT `SYS_PK_MV_CREATION_METADATA` PRIMARY KEY (`MV_CREATION_METADATA_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MV_CREATION_METADATA_ID\", + \"DB_NAME\", + \"TBL_NAME\", + \"TXN_LIST\" +FROM \"MV_CREATION_METADATA\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@MV_CREATION_METADATA +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( + `MV_CREATION_METADATA_ID` bigint, + `DB_NAME` string, + `TBL_NAME` string, + `TXN_LIST` string, + CONSTRAINT `SYS_PK_MV_CREATION_METADATA` PRIMARY KEY (`MV_CREATION_METADATA_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MV_CREATION_METADATA_ID\", + \"DB_NAME\", + \"TBL_NAME\", + \"TXN_LIST\" +FROM \"MV_CREATION_METADATA\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@MV_CREATION_METADATA +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `MV_TABLES_USED` ( + `MV_CREATION_METADATA_ID` bigint, + `TBL_ID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MV_CREATION_METADATA_ID\", + \"TBL_ID\" +FROM \"MV_TABLES_USED\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@MV_TABLES_USED +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `MV_TABLES_USED` ( + `MV_CREATION_METADATA_ID` bigint, + `TBL_ID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MV_CREATION_METADATA_ID\", + \"TBL_ID\" +FROM \"MV_TABLES_USED\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@MV_TABLES_USED +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_COL_PRIV` string, + `TBL_ID` bigint, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_COL_PRIV\", + \"TBL_ID\", + \"AUTHORIZER\" +FROM + \"TBL_COL_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TBL_COL_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_COL_PRIV` string, + `TBL_ID` bigint, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_COL_PRIV\", + \"TBL_ID\", + \"AUTHORIZER\" +FROM + \"TBL_COL_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TBL_COL_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_PRIV` string, + `TBL_ID` bigint, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_TBL_PRIVS` PRIMARY KEY (`TBL_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_PRIV\", + \"TBL_ID\", + \"AUTHORIZER\" +FROM + \"TBL_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TBL_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_PRIV` string, + `TBL_ID` bigint, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_TBL_PRIVS` PRIMARY KEY (`TBL_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_PRIV\", + \"TBL_ID\", + \"AUTHORIZER\" +FROM + \"TBL_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TBL_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `TBL_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"TBL_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"TAB_COL_STATS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TAB_COL_STATS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `TBL_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"TBL_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"TAB_COL_STATS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TAB_COL_STATS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `PARTITION_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `PART_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"PARTITION_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"PART_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"PART_COL_STATS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PART_COL_STATS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `PARTITION_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `PART_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"PARTITION_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"PART_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"PART_COL_STATS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PART_COL_STATS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.0.0' AS `SCHEMA_VERSION`, + 'Hive release version 4.0.0' AS `VERSION_COMMENT` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: SYS@VERSION +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.0.0' AS `SCHEMA_VERSION`, + 'Hive release version 4.0.0' AS `VERSION_COMMENT` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: SYS@VERSION +POSTHOOK: Output: database:sys +POSTHOOK: Lineage: VERSION.schema_version SIMPLE [] +POSTHOOK: Lineage: VERSION.ver_id SIMPLE [] +POSTHOOK: Lineage: VERSION.version_comment SIMPLE [] +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DB_VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_DB_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"VER_ID\", + \"SCHEMA_VERSION\", + \"VERSION_COMMENT\" +FROM + \"VERSION\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@DB_VERSION +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `DB_VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_DB_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"VER_ID\", + \"SCHEMA_VERSION\", + \"VERSION_COMMENT\" +FROM + \"VERSION\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@DB_VERSION +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` bigint, + `CLASS_NAME` string, + `CREATE_TIME` int, + `DB_ID` bigint, + `FUNC_NAME` string, + `FUNC_TYPE` int, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_FUNCS` PRIMARY KEY (`FUNC_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"FUNC_ID\", + \"CLASS_NAME\", + \"CREATE_TIME\", + \"DB_ID\", + \"FUNC_NAME\", + \"FUNC_TYPE\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"FUNCS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@FUNCS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` bigint, + `CLASS_NAME` string, + `CREATE_TIME` int, + `DB_ID` bigint, + `FUNC_NAME` string, + `FUNC_TYPE` int, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_FUNCS` PRIMARY KEY (`FUNC_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"FUNC_ID\", + \"CLASS_NAME\", + \"CREATE_TIME\", + \"DB_ID\", + \"FUNC_NAME\", + \"FUNC_TYPE\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"FUNCS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@FUNCS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` bigint, + `CHILD_INTEGER_IDX` int, + `CHILD_TBL_ID` bigint, + `PARENT_CD_ID` bigint, + `PARENT_INTEGER_IDX` int, + `PARENT_TBL_ID` bigint, + `POSITION` bigint, + `CONSTRAINT_NAME` string, + `CONSTRAINT_TYPE` string, + `UPDATE_RULE` string, + `DELETE_RULE` string, + `ENABLE_VALIDATE_RELY` int, + `DEFAULT_VALUE` string, + CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CHILD_CD_ID\", + \"CHILD_INTEGER_IDX\", + \"CHILD_TBL_ID\", + \"PARENT_CD_ID\", + \"PARENT_INTEGER_IDX\", + \"PARENT_TBL_ID\", + \"POSITION\", + \"CONSTRAINT_NAME\", + \"CONSTRAINT_TYPE\", + \"UPDATE_RULE\", + \"DELETE_RULE\", + \"ENABLE_VALIDATE_RELY\", + \"DEFAULT_VALUE\" +FROM + \"KEY_CONSTRAINTS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@KEY_CONSTRAINTS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` bigint, + `CHILD_INTEGER_IDX` int, + `CHILD_TBL_ID` bigint, + `PARENT_CD_ID` bigint, + `PARENT_INTEGER_IDX` int, + `PARENT_TBL_ID` bigint, + `POSITION` bigint, + `CONSTRAINT_NAME` string, + `CONSTRAINT_TYPE` string, + `UPDATE_RULE` string, + `DELETE_RULE` string, + `ENABLE_VALIDATE_RELY` int, + `DEFAULT_VALUE` string, + CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CHILD_CD_ID\", + \"CHILD_INTEGER_IDX\", + \"CHILD_TBL_ID\", + \"PARENT_CD_ID\", + \"PARENT_INTEGER_IDX\", + \"PARENT_TBL_ID\", + \"POSITION\", + \"CONSTRAINT_NAME\", + \"CONSTRAINT_TYPE\", + \"UPDATE_RULE\", + \"DELETE_RULE\", + \"ENABLE_VALIDATE_RELY\", + \"DEFAULT_VALUE\" +FROM + \"KEY_CONSTRAINTS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@KEY_CONSTRAINTS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE OR REPLACE VIEW `TABLE_STATS_VIEW` AS +SELECT + `TBL_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, +#### A masked pattern was here #### +FROM `TABLE_PARAMS` GROUP BY `TBL_ID` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@table_params +PREHOOK: Output: SYS@TABLE_STATS_VIEW +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE OR REPLACE VIEW `TABLE_STATS_VIEW` AS +SELECT + `TBL_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, +#### A masked pattern was here #### +FROM `TABLE_PARAMS` GROUP BY `TBL_ID` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@table_params +POSTHOOK: Output: SYS@TABLE_STATS_VIEW +POSTHOOK: Output: database:sys +POSTHOOK: Lineage: TABLE_STATS_VIEW.column_stats_accurate EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.num_files EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.num_rows EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.raw_data_size EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.tbl_id SIMPLE [(table_params)table_params.FieldSchema(name:tbl_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.total_size EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.transient_last_ddl_time EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE OR REPLACE VIEW `PARTITION_STATS_VIEW` AS +SELECT + `PART_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, +#### A masked pattern was here #### +FROM `PARTITION_PARAMS` GROUP BY `PART_ID` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@partition_params +PREHOOK: Output: SYS@PARTITION_STATS_VIEW +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE OR REPLACE VIEW `PARTITION_STATS_VIEW` AS +SELECT + `PART_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, +#### A masked pattern was here #### +FROM `PARTITION_PARAMS` GROUP BY `PART_ID` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@partition_params +POSTHOOK: Output: SYS@PARTITION_STATS_VIEW +POSTHOOK: Output: database:sys +POSTHOOK: Lineage: PARTITION_STATS_VIEW.column_stats_accurate EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.num_files EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.num_rows EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.part_id SIMPLE [(partition_params)partition_params.FieldSchema(name:part_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.raw_data_size EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.total_size EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.transient_last_ddl_time EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `NS` string, + `STATUS` string, + `QUERY_PARALLELISM` int, + `DEFAULT_POOL_PATH` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"STATUS\", + \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\", + \"WM_POOL\".\"PATH\" +FROM + \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_RESOURCEPLANS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `NS` string, + `STATUS` string, + `QUERY_PARALLELISM` int, + `DEFAULT_POOL_PATH` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"STATUS\", + \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\", + \"WM_POOL\".\"PATH\" +FROM + \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_RESOURCEPLANS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_TRIGGERS` ( + `RP_NAME` string, + `NS` string, + `NAME` string, + `TRIGGER_EXPRESSION` string, + `ACTION_EXPRESSION` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + r.\"NAME\" AS RP_NAME, + case when r.\"NS\" is null then 'default' else r.\"NS\" end, + t.\"NAME\" AS NAME, + \"TRIGGER_EXPRESSION\", + \"ACTION_EXPRESSION\" +FROM + \"WM_TRIGGER\" t +JOIN + \"WM_RESOURCEPLAN\" r +ON + t.\"RP_ID\" = r.\"RP_ID\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_TRIGGERS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_TRIGGERS` ( + `RP_NAME` string, + `NS` string, + `NAME` string, + `TRIGGER_EXPRESSION` string, + `ACTION_EXPRESSION` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + r.\"NAME\" AS RP_NAME, + case when r.\"NS\" is null then 'default' else r.\"NS\" end, + t.\"NAME\" AS NAME, + \"TRIGGER_EXPRESSION\", + \"ACTION_EXPRESSION\" +FROM + \"WM_TRIGGER\" t +JOIN + \"WM_RESOURCEPLAN\" r +ON + t.\"RP_ID\" = r.\"RP_ID\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_TRIGGERS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS` ( + `RP_NAME` string, + `NS` string, + `PATH` string, + `ALLOC_FRACTION` double, + `QUERY_PARALLELISM` int, + `SCHEDULING_POLICY` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"WM_POOL\".\"PATH\", + \"WM_POOL\".\"ALLOC_FRACTION\", + \"WM_POOL\".\"QUERY_PARALLELISM\", + \"WM_POOL\".\"SCHEDULING_POLICY\" +FROM + \"WM_POOL\" +JOIN + \"WM_RESOURCEPLAN\" +ON + \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_POOLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS` ( + `RP_NAME` string, + `NS` string, + `PATH` string, + `ALLOC_FRACTION` double, + `QUERY_PARALLELISM` int, + `SCHEDULING_POLICY` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"WM_POOL\".\"PATH\", + \"WM_POOL\".\"ALLOC_FRACTION\", + \"WM_POOL\".\"QUERY_PARALLELISM\", + \"WM_POOL\".\"SCHEDULING_POLICY\" +FROM + \"WM_POOL\" +JOIN + \"WM_RESOURCEPLAN\" +ON + \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_POOLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` ( + `RP_NAME` string, + `NS` string, + `POOL_PATH` string, + `TRIGGER_NAME` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"WM_POOL\".\"PATH\" AS POOL_PATH, + \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME +FROM \"WM_POOL_TO_TRIGGER\" + JOIN \"WM_POOL\" ON \"WM_POOL_TO_TRIGGER\".\"POOL_ID\" = \"WM_POOL\".\"POOL_ID\" + JOIN \"WM_TRIGGER\" ON \"WM_POOL_TO_TRIGGER\".\"TRIGGER_ID\" = \"WM_TRIGGER\".\"TRIGGER_ID\" + JOIN \"WM_RESOURCEPLAN\" ON \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +UNION +SELECT + \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + '' AS POOL_PATH, + \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME +FROM \"WM_TRIGGER\" + JOIN \"WM_RESOURCEPLAN\" ON \"WM_TRIGGER\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +WHERE CAST(\"WM_TRIGGER\".\"IS_IN_UNMANAGED\" AS CHAR) IN ('1', 't') +" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_POOLS_TO_TRIGGERS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` ( + `RP_NAME` string, + `NS` string, + `POOL_PATH` string, + `TRIGGER_NAME` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"WM_POOL\".\"PATH\" AS POOL_PATH, + \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME +FROM \"WM_POOL_TO_TRIGGER\" + JOIN \"WM_POOL\" ON \"WM_POOL_TO_TRIGGER\".\"POOL_ID\" = \"WM_POOL\".\"POOL_ID\" + JOIN \"WM_TRIGGER\" ON \"WM_POOL_TO_TRIGGER\".\"TRIGGER_ID\" = \"WM_TRIGGER\".\"TRIGGER_ID\" + JOIN \"WM_RESOURCEPLAN\" ON \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +UNION +SELECT + \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + '' AS POOL_PATH, + \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME +FROM \"WM_TRIGGER\" + JOIN \"WM_RESOURCEPLAN\" ON \"WM_TRIGGER\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +WHERE CAST(\"WM_TRIGGER\".\"IS_IN_UNMANAGED\" AS CHAR) IN ('1', 't') +" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_POOLS_TO_TRIGGERS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_MAPPINGS` ( + `RP_NAME` string, + `NS` string, + `ENTITY_TYPE` string, + `ENTITY_NAME` string, + `POOL_PATH` string, + `ORDERING` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"ENTITY_TYPE\", + \"ENTITY_NAME\", + case when \"WM_POOL\".\"PATH\" is null then '' else \"WM_POOL\".\"PATH\" end, + \"ORDERING\" +FROM \"WM_MAPPING\" +JOIN \"WM_RESOURCEPLAN\" ON \"WM_MAPPING\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_ID\" +" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_MAPPINGS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `WM_MAPPINGS` ( + `RP_NAME` string, + `NS` string, + `ENTITY_TYPE` string, + `ENTITY_NAME` string, + `POOL_PATH` string, + `ORDERING` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"ENTITY_TYPE\", + \"ENTITY_NAME\", + case when \"WM_POOL\".\"PATH\" is null then '' else \"WM_POOL\".\"PATH\" end, + \"ORDERING\" +FROM \"WM_MAPPING\" +JOIN \"WM_RESOURCEPLAN\" ON \"WM_MAPPING\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_ID\" +" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_MAPPINGS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` ( + `CQ_ID` bigint, + `CQ_DATABASE` string, + `CQ_TABLE` string, + `CQ_PARTITION` string, + `CQ_STATE` string, + `CQ_TYPE` string, + `CQ_TBLPROPERTIES` string, + `CQ_WORKER_ID` string, + `CQ_START` bigint, + `CQ_RUN_AS` string, + `CQ_HIGHEST_WRITE_ID` bigint, + `CQ_HADOOP_JOB_ID` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"COMPACTION_QUEUE\".\"CQ_ID\", + \"COMPACTION_QUEUE\".\"CQ_DATABASE\", + \"COMPACTION_QUEUE\".\"CQ_TABLE\", + \"COMPACTION_QUEUE\".\"CQ_PARTITION\", + \"COMPACTION_QUEUE\".\"CQ_STATE\", + \"COMPACTION_QUEUE\".\"CQ_TYPE\", + \"COMPACTION_QUEUE\".\"CQ_TBLPROPERTIES\", + \"COMPACTION_QUEUE\".\"CQ_WORKER_ID\", + \"COMPACTION_QUEUE\".\"CQ_START\", + \"COMPACTION_QUEUE\".\"CQ_RUN_AS\", + \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\", + \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\" +FROM \"COMPACTION_QUEUE\" +" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@COMPACTION_QUEUE +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` ( + `CQ_ID` bigint, + `CQ_DATABASE` string, + `CQ_TABLE` string, + `CQ_PARTITION` string, + `CQ_STATE` string, + `CQ_TYPE` string, + `CQ_TBLPROPERTIES` string, + `CQ_WORKER_ID` string, + `CQ_START` bigint, + `CQ_RUN_AS` string, + `CQ_HIGHEST_WRITE_ID` bigint, + `CQ_HADOOP_JOB_ID` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"COMPACTION_QUEUE\".\"CQ_ID\", + \"COMPACTION_QUEUE\".\"CQ_DATABASE\", + \"COMPACTION_QUEUE\".\"CQ_TABLE\", + \"COMPACTION_QUEUE\".\"CQ_PARTITION\", + \"COMPACTION_QUEUE\".\"CQ_STATE\", + \"COMPACTION_QUEUE\".\"CQ_TYPE\", + \"COMPACTION_QUEUE\".\"CQ_TBLPROPERTIES\", + \"COMPACTION_QUEUE\".\"CQ_WORKER_ID\", + \"COMPACTION_QUEUE\".\"CQ_START\", + \"COMPACTION_QUEUE\".\"CQ_RUN_AS\", + \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\", + \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\" +FROM \"COMPACTION_QUEUE\" +" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@COMPACTION_QUEUE +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` ( + `CC_ID` bigint, + `CC_DATABASE` string, + `CC_TABLE` string, + `CC_PARTITION` string, + `CC_STATE` string, + `CC_TYPE` string, + `CC_TBLPROPERTIES` string, + `CC_WORKER_ID` string, + `CC_START` bigint, + `CC_END` bigint, + `CC_RUN_AS` string, + `CC_HIGHEST_WRITE_ID` bigint, + `CC_HADOOP_JOB_ID` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"COMPLETED_COMPACTIONS\".\"CC_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_DATABASE\", + \"COMPLETED_COMPACTIONS\".\"CC_TABLE\", + \"COMPLETED_COMPACTIONS\".\"CC_PARTITION\", + \"COMPLETED_COMPACTIONS\".\"CC_STATE\", + \"COMPLETED_COMPACTIONS\".\"CC_TYPE\", + \"COMPLETED_COMPACTIONS\".\"CC_TBLPROPERTIES\", + \"COMPLETED_COMPACTIONS\".\"CC_WORKER_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_START\", + \"COMPLETED_COMPACTIONS\".\"CC_END\", + \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\", + \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\" +FROM \"COMPLETED_COMPACTIONS\" +" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@COMPLETED_COMPACTIONS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` ( + `CC_ID` bigint, + `CC_DATABASE` string, + `CC_TABLE` string, + `CC_PARTITION` string, + `CC_STATE` string, + `CC_TYPE` string, + `CC_TBLPROPERTIES` string, + `CC_WORKER_ID` string, + `CC_START` bigint, + `CC_END` bigint, + `CC_RUN_AS` string, + `CC_HIGHEST_WRITE_ID` bigint, + `CC_HADOOP_JOB_ID` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"COMPLETED_COMPACTIONS\".\"CC_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_DATABASE\", + \"COMPLETED_COMPACTIONS\".\"CC_TABLE\", + \"COMPLETED_COMPACTIONS\".\"CC_PARTITION\", + \"COMPLETED_COMPACTIONS\".\"CC_STATE\", + \"COMPLETED_COMPACTIONS\".\"CC_TYPE\", + \"COMPLETED_COMPACTIONS\".\"CC_TBLPROPERTIES\", + \"COMPLETED_COMPACTIONS\".\"CC_WORKER_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_START\", + \"COMPLETED_COMPACTIONS\".\"CC_END\", + \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\", + \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\" +FROM \"COMPLETED_COMPACTIONS\" +" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@COMPLETED_COMPACTIONS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE OR REPLACE VIEW `COMPACTIONS` +( + `C_ID`, + `C_CATALOG`, + `C_DATABASE`, + `C_TABLE`, + `C_PARTITION`, + `C_TYPE`, + `C_STATE`, + `C_HOSTNAME`, + `C_WORKER_ID`, + `C_START`, + `C_DURATION`, + `C_HADOOP_JOB_ID`, + `C_RUN_AS`, + `C_HIGHEST_WRITE_ID` +) AS +SELECT + CC_ID, + 'default', + CC_DATABASE, + CC_TABLE, + CC_PARTITION, + CASE WHEN CC_TYPE = 'i' THEN 'minor' WHEN CC_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, + CASE WHEN CC_STATE = 'f' THEN 'failed' WHEN CC_STATE = 's' THEN 'succeeded' WHEN CC_STATE = 'a' THEN 'attempted' ELSE 'UNKNOWN' END, + CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[0] END, + CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[1] END, + CC_START, + CASE WHEN CC_END IS NULL THEN cast (null as string) ELSE CC_END-CC_START END, + CC_HADOOP_JOB_ID, + CC_RUN_AS, + CC_HIGHEST_WRITE_ID +FROM COMPLETED_COMPACTIONS +UNION ALL +SELECT + CQ_ID, + 'default', + CQ_DATABASE, + CQ_TABLE, + CQ_PARTITION, + CASE WHEN CQ_TYPE = 'i' THEN 'minor' WHEN CQ_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, + CASE WHEN CQ_STATE = 'i' THEN 'initiated' WHEN CQ_STATE = 'w' THEN 'working' WHEN CQ_STATE = 'r' THEN 'ready for cleaning' ELSE 'UNKNOWN' END, + CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[0] END, + CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[1] END, + CQ_START, + cast (null as string), + CQ_HADOOP_JOB_ID, + CQ_RUN_AS, + CQ_HIGHEST_WRITE_ID +FROM COMPACTION_QUEUE +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@compaction_queue +PREHOOK: Input: sys@completed_compactions +PREHOOK: Output: SYS@COMPACTIONS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE OR REPLACE VIEW `COMPACTIONS` +( + `C_ID`, + `C_CATALOG`, + `C_DATABASE`, + `C_TABLE`, + `C_PARTITION`, + `C_TYPE`, + `C_STATE`, + `C_HOSTNAME`, + `C_WORKER_ID`, + `C_START`, + `C_DURATION`, + `C_HADOOP_JOB_ID`, + `C_RUN_AS`, + `C_HIGHEST_WRITE_ID` +) AS +SELECT + CC_ID, + 'default', + CC_DATABASE, + CC_TABLE, + CC_PARTITION, + CASE WHEN CC_TYPE = 'i' THEN 'minor' WHEN CC_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, + CASE WHEN CC_STATE = 'f' THEN 'failed' WHEN CC_STATE = 's' THEN 'succeeded' WHEN CC_STATE = 'a' THEN 'attempted' ELSE 'UNKNOWN' END, + CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[0] END, + CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[1] END, + CC_START, + CASE WHEN CC_END IS NULL THEN cast (null as string) ELSE CC_END-CC_START END, + CC_HADOOP_JOB_ID, + CC_RUN_AS, + CC_HIGHEST_WRITE_ID +FROM COMPLETED_COMPACTIONS +UNION ALL +SELECT + CQ_ID, + 'default', + CQ_DATABASE, + CQ_TABLE, + CQ_PARTITION, + CASE WHEN CQ_TYPE = 'i' THEN 'minor' WHEN CQ_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, + CASE WHEN CQ_STATE = 'i' THEN 'initiated' WHEN CQ_STATE = 'w' THEN 'working' WHEN CQ_STATE = 'r' THEN 'ready for cleaning' ELSE 'UNKNOWN' END, + CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[0] END, + CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[1] END, + CQ_START, + cast (null as string), + CQ_HADOOP_JOB_ID, + CQ_RUN_AS, + CQ_HIGHEST_WRITE_ID +FROM COMPACTION_QUEUE +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@compaction_queue +POSTHOOK: Input: sys@completed_compactions +POSTHOOK: Output: SYS@COMPACTIONS +POSTHOOK: Output: database:sys +POSTHOOK: Lineage: COMPACTIONS.c_catalog EXPRESSION [] +POSTHOOK: Lineage: COMPACTIONS.c_database EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_database, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_database, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_duration EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_end, type:bigint, comment:from deserializer), (completed_compactions)completed_compactions.FieldSchema(name:cc_start, type:bigint, comment:from deserializer), ] +#### A masked pattern was here #### +POSTHOOK: Lineage: COMPACTIONS.c_highest_write_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_highest_write_id, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_highest_write_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_hostname EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_worker_id, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_worker_id, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_id, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_partition EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_partition, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_partition, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_run_as EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_run_as, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_run_as, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_start EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_start, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_start, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_state EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_state, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_state, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_table EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_table, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_table, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_type EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_type, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_type, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_worker_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_worker_id, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_worker_id, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE DATABASE IF NOT EXISTS INFORMATION_SCHEMA +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:INFORMATION_SCHEMA +POSTHOOK: query: CREATE DATABASE IF NOT EXISTS INFORMATION_SCHEMA +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:INFORMATION_SCHEMA +PREHOOK: query: USE INFORMATION_SCHEMA +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:information_schema +POSTHOOK: query: USE INFORMATION_SCHEMA +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:information_schema +PREHOOK: query: CREATE OR REPLACE VIEW `SCHEMATA` +( + `CATALOG_NAME`, + `SCHEMA_NAME`, + `SCHEMA_OWNER`, + `DEFAULT_CHARACTER_SET_CATALOG`, + `DEFAULT_CHARACTER_SET_SCHEMA`, + `DEFAULT_CHARACTER_SET_NAME`, + `SQL_PATH` +) AS +SELECT DISTINCT + 'default', + D.`NAME`, + D.`OWNER_NAME`, + cast(null as string), + cast(null as string), + cast(null as string), + `DB_LOCATION_URI` +FROM + `sys`.`DBS` D LEFT JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND current_authorizer() = P.`AUTHORIZER` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@tbl_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@SCHEMATA +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE OR REPLACE VIEW `SCHEMATA` +( + `CATALOG_NAME`, + `SCHEMA_NAME`, + `SCHEMA_OWNER`, + `DEFAULT_CHARACTER_SET_CATALOG`, + `DEFAULT_CHARACTER_SET_SCHEMA`, + `DEFAULT_CHARACTER_SET_NAME`, + `SQL_PATH` +) AS +SELECT DISTINCT + 'default', + D.`NAME`, + D.`OWNER_NAME`, + cast(null as string), + cast(null as string), + cast(null as string), + `DB_LOCATION_URI` +FROM + `sys`.`DBS` D LEFT JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND current_authorizer() = P.`AUTHORIZER` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@tbl_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@SCHEMATA +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: SCHEMATA.catalog_name SIMPLE [] +POSTHOOK: Lineage: SCHEMATA.default_character_set_catalog EXPRESSION [] +POSTHOOK: Lineage: SCHEMATA.default_character_set_name EXPRESSION [] +POSTHOOK: Lineage: SCHEMATA.default_character_set_schema EXPRESSION [] +POSTHOOK: Lineage: SCHEMATA.schema_name SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +#### A masked pattern was here #### +POSTHOOK: Lineage: SCHEMATA.sql_path SIMPLE [(dbs)d.FieldSchema(name:db_location_uri, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE OR REPLACE VIEW `TABLES` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `TABLE_TYPE`, + `SELF_REFERENCING_COLUMN_NAME`, + `REFERENCE_GENERATION`, + `USER_DEFINED_TYPE_CATALOG`, + `USER_DEFINED_TYPE_SCHEMA`, + `USER_DEFINED_TYPE_NAME`, + `IS_INSERTABLE_INTO`, + `IS_TYPED`, + `COMMIT_ACTION` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), + 'NO', + cast(null as string) +FROM + `sys`.`TBLS` T JOIN `sys`.`DBS` D ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@tbl_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@TABLES +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE OR REPLACE VIEW `TABLES` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `TABLE_TYPE`, + `SELF_REFERENCING_COLUMN_NAME`, + `REFERENCE_GENERATION`, + `USER_DEFINED_TYPE_CATALOG`, + `USER_DEFINED_TYPE_SCHEMA`, + `USER_DEFINED_TYPE_NAME`, + `IS_INSERTABLE_INTO`, + `IS_TYPED`, + `COMMIT_ACTION` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), + 'NO', + cast(null as string) +FROM + `sys`.`TBLS` T JOIN `sys`.`DBS` D ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@tbl_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@TABLES +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: TABLES.commit_action EXPRESSION [] +POSTHOOK: Lineage: TABLES.is_insertable_into EXPRESSION [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLES.is_typed SIMPLE [] +POSTHOOK: Lineage: TABLES.reference_generation EXPRESSION [] +POSTHOOK: Lineage: TABLES.self_referencing_column_name EXPRESSION [] +POSTHOOK: Lineage: TABLES.table_catalog SIMPLE [] +POSTHOOK: Lineage: TABLES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLES.table_type EXPRESSION [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLES.user_defined_type_catalog EXPRESSION [] +POSTHOOK: Lineage: TABLES.user_defined_type_name EXPRESSION [] +POSTHOOK: Lineage: TABLES.user_defined_type_schema EXPRESSION [] +PREHOOK: query: CREATE OR REPLACE VIEW `TABLE_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE`, + `WITH_HIERARCHY` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`TBL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), + 'NO' +FROM + `sys`.`TBL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR + (P2.`TBL_ID` IS NOT NULL AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER` = current_authorizer() AND P2.`AUTHORIZER` = current_authorizer()) +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@tbl_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@TABLE_PRIVILEGES +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE OR REPLACE VIEW `TABLE_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE`, + `WITH_HIERARCHY` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`TBL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), + 'NO' +FROM + `sys`.`TBL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR + (P2.`TBL_ID` IS NOT NULL AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER` = current_authorizer() AND P2.`AUTHORIZER` = current_authorizer()) +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@tbl_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@TABLE_PRIVILEGES +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: TABLE_PRIVILEGES.grantee SIMPLE [(tbl_privs)p.FieldSchema(name:principal_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.grantor SIMPLE [(tbl_privs)p.FieldSchema(name:grantor, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.is_grantable EXPRESSION [(tbl_privs)p.FieldSchema(name:grant_option, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.privilege_type SIMPLE [(tbl_privs)p.FieldSchema(name:tbl_priv, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.table_catalog SIMPLE [] +POSTHOOK: Lineage: TABLE_PRIVILEGES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.with_hierarchy SIMPLE [] +PREHOOK: query: CREATE OR REPLACE VIEW `COLUMNS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `ORDINAL_POSITION`, + `COLUMN_DEFAULT`, + `IS_NULLABLE`, + `DATA_TYPE`, + `CHARACTER_MAXIMUM_LENGTH`, + `CHARACTER_OCTET_LENGTH`, + `NUMERIC_PRECISION`, + `NUMERIC_PRECISION_RADIX`, + `NUMERIC_SCALE`, + `DATETIME_PRECISION`, + `INTERVAL_TYPE`, + `INTERVAL_PRECISION`, + `CHARACTER_SET_CATALOG`, + `CHARACTER_SET_SCHEMA`, + `CHARACTER_SET_NAME`, + `COLLATION_CATALOG`, + `COLLATION_SCHEMA`, + `COLLATION_NAME`, + `UDT_CATALOG`, + `UDT_SCHEMA`, + `UDT_NAME`, + `SCOPE_CATALOG`, + `SCOPE_SCHEMA`, + `SCOPE_NAME`, + `MAXIMUM_CARDINALITY`, + `DTD_IDENTIFIER`, + `IS_SELF_REFERENCING`, + `IS_IDENTITY`, + `IDENTITY_GENERATION`, + `IDENTITY_START`, + `IDENTITY_INCREMENT`, + `IDENTITY_MAXIMUM`, + `IDENTITY_MINIMUM`, + `IDENTITY_CYCLE`, + `IS_GENERATED`, + `GENERATION_EXPRESSION`, + `IS_SYSTEM_TIME_PERIOD_START`, + `IS_SYSTEM_TIME_PERIOD_END`, + `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, + `IS_UPDATABLE`, + `DECLARED_DATA_TYPE`, + `DECLARED_NUMERIC_PRECISION`, + `DECLARED_NUMERIC_SCALE` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + C.COLUMN_NAME, + C.INTEGER_IDX, + cast (null as string), + 'YES', + C.TYPE_NAME as TYPE_NAME, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 + WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 + ELSE null END, + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + C.CD_ID, + 'NO', + 'NO', + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + 'NEVER', + cast (null as string), + 'NO', + 'NO', + cast (null as string), + 'YES', + C.TYPE_NAME as DECLARED_DATA_TYPE, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END +FROM + `sys`.`COLUMNS_V2` C JOIN `sys`.`SDS` S ON (C.`CD_ID` = S.`CD_ID`) + JOIN `sys`.`TBLS` T ON (S.`SD_ID` = T.`SD_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + LEFT JOIN `sys`.`TBL_COL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND C.`COLUMN_NAME` = P.`COLUMN_NAME` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_COL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@columns_v2 +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@sds +PREHOOK: Input: sys@tbl_col_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@COLUMNS +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE OR REPLACE VIEW `COLUMNS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `ORDINAL_POSITION`, + `COLUMN_DEFAULT`, + `IS_NULLABLE`, + `DATA_TYPE`, + `CHARACTER_MAXIMUM_LENGTH`, + `CHARACTER_OCTET_LENGTH`, + `NUMERIC_PRECISION`, + `NUMERIC_PRECISION_RADIX`, + `NUMERIC_SCALE`, + `DATETIME_PRECISION`, + `INTERVAL_TYPE`, + `INTERVAL_PRECISION`, + `CHARACTER_SET_CATALOG`, + `CHARACTER_SET_SCHEMA`, + `CHARACTER_SET_NAME`, + `COLLATION_CATALOG`, + `COLLATION_SCHEMA`, + `COLLATION_NAME`, + `UDT_CATALOG`, + `UDT_SCHEMA`, + `UDT_NAME`, + `SCOPE_CATALOG`, + `SCOPE_SCHEMA`, + `SCOPE_NAME`, + `MAXIMUM_CARDINALITY`, + `DTD_IDENTIFIER`, + `IS_SELF_REFERENCING`, + `IS_IDENTITY`, + `IDENTITY_GENERATION`, + `IDENTITY_START`, + `IDENTITY_INCREMENT`, + `IDENTITY_MAXIMUM`, + `IDENTITY_MINIMUM`, + `IDENTITY_CYCLE`, + `IS_GENERATED`, + `GENERATION_EXPRESSION`, + `IS_SYSTEM_TIME_PERIOD_START`, + `IS_SYSTEM_TIME_PERIOD_END`, + `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, + `IS_UPDATABLE`, + `DECLARED_DATA_TYPE`, + `DECLARED_NUMERIC_PRECISION`, + `DECLARED_NUMERIC_SCALE` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + C.COLUMN_NAME, + C.INTEGER_IDX, + cast (null as string), + 'YES', + C.TYPE_NAME as TYPE_NAME, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 + WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 + ELSE null END, + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + C.CD_ID, + 'NO', + 'NO', + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + 'NEVER', + cast (null as string), + 'NO', + 'NO', + cast (null as string), + 'YES', + C.TYPE_NAME as DECLARED_DATA_TYPE, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END +FROM + `sys`.`COLUMNS_V2` C JOIN `sys`.`SDS` S ON (C.`CD_ID` = S.`CD_ID`) + JOIN `sys`.`TBLS` T ON (S.`SD_ID` = T.`SD_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + LEFT JOIN `sys`.`TBL_COL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND C.`COLUMN_NAME` = P.`COLUMN_NAME` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_COL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@columns_v2 +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@sds +POSTHOOK: Input: sys@tbl_col_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@COLUMNS +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: COLUMNS.character_maximum_length EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.character_octet_length EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.character_set_catalog EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.character_set_name EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.character_set_schema EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.collation_catalog EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.collation_name EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.collation_schema EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.column_default EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.column_name SIMPLE [(columns_v2)c.FieldSchema(name:column_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.data_type SIMPLE [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.datetime_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.declared_data_type SIMPLE [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.declared_numeric_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.declared_numeric_scale EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.dtd_identifier SIMPLE [(columns_v2)c.FieldSchema(name:cd_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.generation_expression EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_cycle EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_generation EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_increment EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_maximum EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_minimum EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_start EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.interval_precision EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.interval_type EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.is_generated SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_identity SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_nullable SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_self_referencing SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_system_time_period_end SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_system_time_period_start SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_updatable SIMPLE [] +POSTHOOK: Lineage: COLUMNS.maximum_cardinality EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.numeric_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.numeric_precision_radix EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.numeric_scale EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.ordinal_position SIMPLE [(columns_v2)c.FieldSchema(name:integer_idx, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.scope_catalog EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.scope_name EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.scope_schema EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.system_time_period_timestamp_generation EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.table_catalog SIMPLE [] +POSTHOOK: Lineage: COLUMNS.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.udt_catalog EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.udt_name EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.udt_schema EXPRESSION [] +PREHOOK: query: CREATE OR REPLACE VIEW `COLUMN_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`COLUMN_NAME`, + P.`TBL_COL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') +FROM + `sys`.`TBL_COL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + JOIN `sys`.`SDS` S ON (S.`SD_ID` = T.`SD_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P2.`TBL_ID` IS NOT NULL + AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() AND P2.`AUTHORIZER`=current_authorizer() +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@sds +PREHOOK: Input: sys@tbl_col_privs +PREHOOK: Input: sys@tbl_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@COLUMN_PRIVILEGES +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE OR REPLACE VIEW `COLUMN_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`COLUMN_NAME`, + P.`TBL_COL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') +FROM + `sys`.`TBL_COL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + JOIN `sys`.`SDS` S ON (S.`SD_ID` = T.`SD_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P2.`TBL_ID` IS NOT NULL + AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() AND P2.`AUTHORIZER`=current_authorizer() +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@sds +POSTHOOK: Input: sys@tbl_col_privs +POSTHOOK: Input: sys@tbl_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@COLUMN_PRIVILEGES +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: COLUMN_PRIVILEGES.column_name SIMPLE [(tbl_col_privs)p.FieldSchema(name:column_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.grantee SIMPLE [(tbl_col_privs)p.FieldSchema(name:principal_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.grantor SIMPLE [(tbl_col_privs)p.FieldSchema(name:grantor, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.is_grantable EXPRESSION [(tbl_col_privs)p.FieldSchema(name:grant_option, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.privilege_type SIMPLE [(tbl_col_privs)p.FieldSchema(name:tbl_col_priv, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_catalog SIMPLE [] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE OR REPLACE VIEW `VIEWS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `VIEW_DEFINITION`, + `CHECK_OPTION`, + `IS_UPDATABLE`, + `IS_INSERTABLE_INTO`, + `IS_TRIGGER_UPDATABLE`, + `IS_TRIGGER_DELETABLE`, + `IS_TRIGGER_INSERTABLE_INTO` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + T.VIEW_ORIGINAL_TEXT, + CAST(NULL as string), + false, + false, + false, + false, + false +FROM + `sys`.`DBS` D JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + length(T.VIEW_ORIGINAL_TEXT) > 0 + AND (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()) +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@tbl_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@VIEWS +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE OR REPLACE VIEW `VIEWS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `VIEW_DEFINITION`, + `CHECK_OPTION`, + `IS_UPDATABLE`, + `IS_INSERTABLE_INTO`, + `IS_TRIGGER_UPDATABLE`, + `IS_TRIGGER_DELETABLE`, + `IS_TRIGGER_INSERTABLE_INTO` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + T.VIEW_ORIGINAL_TEXT, + CAST(NULL as string), + false, + false, + false, + false, + false +FROM + `sys`.`DBS` D JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + length(T.VIEW_ORIGINAL_TEXT) > 0 + AND (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()) +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@tbl_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@VIEWS +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: VIEWS.check_option EXPRESSION [] +POSTHOOK: Lineage: VIEWS.is_insertable_into SIMPLE [] +POSTHOOK: Lineage: VIEWS.is_trigger_deletable SIMPLE [] +POSTHOOK: Lineage: VIEWS.is_trigger_insertable_into SIMPLE [] +POSTHOOK: Lineage: VIEWS.is_trigger_updatable SIMPLE [] +POSTHOOK: Lineage: VIEWS.is_updatable SIMPLE [] +POSTHOOK: Lineage: VIEWS.table_catalog SIMPLE [] +POSTHOOK: Lineage: VIEWS.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: VIEWS.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: VIEWS.view_definition SIMPLE [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE OR REPLACE VIEW `COMPACTIONS` +( + `C_ID`, + `C_CATALOG`, + `C_DATABASE`, + `C_TABLE`, + `C_PARTITION`, + `C_TYPE`, + `C_STATE`, + `C_HOSTNAME`, + `C_WORKER_ID`, + `C_START`, + `C_DURATION`, + `C_HADOOP_JOB_ID`, + `C_RUN_AS`, + `C_HIGHEST_WRITE_ID` +) AS +SELECT DISTINCT + C_ID, + C_CATALOG, + C_DATABASE, + C_TABLE, + C_PARTITION, + C_TYPE, + C_STATE, + C_HOSTNAME, + C_WORKER_ID, + C_START, + C_DURATION, + C_HADOOP_JOB_ID, + C_RUN_AS, + C_HIGHEST_WRITE_ID +FROM + `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`) + JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()) +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@compaction_queue +PREHOOK: Input: sys@compactions +PREHOOK: Input: sys@completed_compactions +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@tbl_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@COMPACTIONS +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE OR REPLACE VIEW `COMPACTIONS` +( + `C_ID`, + `C_CATALOG`, + `C_DATABASE`, + `C_TABLE`, + `C_PARTITION`, + `C_TYPE`, + `C_STATE`, + `C_HOSTNAME`, + `C_WORKER_ID`, + `C_START`, + `C_DURATION`, + `C_HADOOP_JOB_ID`, + `C_RUN_AS`, + `C_HIGHEST_WRITE_ID` +) AS +SELECT DISTINCT + C_ID, + C_CATALOG, + C_DATABASE, + C_TABLE, + C_PARTITION, + C_TYPE, + C_STATE, + C_HOSTNAME, + C_WORKER_ID, + C_START, + C_DURATION, + C_HADOOP_JOB_ID, + C_RUN_AS, + C_HIGHEST_WRITE_ID +FROM + `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`) + JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()) +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@compaction_queue +POSTHOOK: Input: sys@compactions +POSTHOOK: Input: sys@completed_compactions +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@tbl_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@COMPACTIONS +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: COMPACTIONS.c_catalog EXPRESSION [] +POSTHOOK: Lineage: COMPACTIONS.c_database EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_database, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_database, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_duration EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_end, type:bigint, comment:from deserializer), (completed_compactions)completed_compactions.FieldSchema(name:cc_start, type:bigint, comment:from deserializer), ] +#### A masked pattern was here #### +POSTHOOK: Lineage: COMPACTIONS.c_highest_write_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_highest_write_id, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_highest_write_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_hostname EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_worker_id, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_worker_id, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_id, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_partition EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_partition, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_partition, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_run_as EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_run_as, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_run_as, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_start EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_start, type:bigint, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_start, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_state EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_state, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_state, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_table EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_table, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_table, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_type EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_type, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_type, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COMPACTIONS.c_worker_id EXPRESSION [(completed_compactions)completed_compactions.FieldSchema(name:cc_worker_id, type:string, comment:from deserializer), (compaction_queue)compaction_queue.FieldSchema(name:cq_worker_id, type:string, comment:from deserializer), ] +PREHOOK: query: SHOW RESOURCE PLANS +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLANS +POSTHOOK: type: SHOW RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +PREHOOK: query: CREATE RESOURCE PLAN plan_1 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN plan_1 +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: EXPLAIN SHOW RESOURCE PLANS +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN SHOW RESOURCE PLANS +POSTHOOK: type: SHOW RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Resource plans + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SHOW RESOURCE PLANS +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLANS +POSTHOOK: type: SHOW RESOURCEPLAN +plan_1 DISABLED +PREHOOK: query: EXPLAIN SHOW RESOURCE PLAN plan_1 +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN SHOW RESOURCE PLAN plan_1 +POSTHOOK: type: SHOW RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Resource plans + resourcePlanName: plan_1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SHOW RESOURCE PLAN plan_1 +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLAN plan_1 +POSTHOOK: type: SHOW RESOURCEPLAN +plan_1[status=DISABLED,parallelism=null,defaultPool=default] + + default[allocFraction=1.0,schedulingPolicy=null,parallelism=4] + | mapped for default +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default DISABLED NULL default +PREHOOK: query: EXPLAIN CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 +POSTHOOK: type: CREATE RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create ResourcePlan + planName: plan_2 + queryParallelism: 5 + +PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5 +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan to modify: plan_2 + Resource plan changed fields: + shouldValidate: false + +PREHOOK: query: ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10 +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SHOW RESOURCE PLANS +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLANS +POSTHOOK: type: SHOW RESOURCEPLAN +plan_1 DISABLED +plan_2 DISABLED 10 +PREHOOK: query: SHOW RESOURCE PLAN plan_2 +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLAN plan_2 +POSTHOOK: type: SHOW RESOURCEPLAN +plan_2[status=DISABLED,parallelism=10,defaultPool=default] + + default[allocFraction=1.0,schedulingPolicy=null,parallelism=5] + | mapped for default +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default DISABLED NULL default +plan_2 default DISABLED 10 default +PREHOOK: query: CREATE RESOURCE PLAN plan_2 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Resource plan plan_2 already exists +PREHOOK: query: CREATE RESOURCE PLAN IF NOT EXISTS plan_2 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN IF NOT EXISTS plan_2 +POSTHOOK: type: CREATE RESOURCEPLAN +FAILED: SemanticException Invalid create arguments (tok_create_rp plan_3 (tok_query_parallelism 5) (tok_default_pool all)) +PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_2 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. AlreadyExistsException(message:Resource plan name should be unique: ) +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default DISABLED NULL default +plan_2 default DISABLED 10 default +PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_3 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_3 +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default DISABLED NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 4 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 4 +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default DISABLED 4 default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 UNSET QUERY_PARALLELISM +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 UNSET QUERY_PARALLELISM +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default DISABLED NULL default +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan to modify: plan_3 + Resource plan changed fields: + shouldValidate: false + +PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot find pool: default1) +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default DISABLED NULL default +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 ENABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 ENABLE +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan to modify: plan_3 + Resource plan changed fields: + shouldValidate: false + +PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 +POSTHOOK: type: ALTER RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Resource plans + Resource plan to modify: plan_3 + Resource plan changed fields: + shouldValidate: false + +PREHOOK: query: ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) +PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) +PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default DISABLED NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command)) +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default DISABLED NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default DISABLED NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default ENABLED NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default ACTIVE NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default ACTIVE NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default ACTIVE NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default ACTIVE NULL default +PREHOOK: query: DISABLE WORKLOAD MANAGEMENT +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DISABLE WORKLOAD MANAGEMENT +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default ENABLED NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE ACTIVATE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default DISABLED 10 default +plan_3 default ACTIVE NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default ENABLED 10 default +plan_3 default ACTIVE NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_2 ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ACTIVATE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default ACTIVE 10 default +plan_3 default ENABLED NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default ACTIVE 10 default +plan_3 default ENABLED NULL default +PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default ACTIVE 10 default +plan_3 default DISABLED NULL default +PREHOOK: query: EXPLAIN DROP RESOURCE PLAN plan_2 +PREHOOK: type: DROP RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP RESOURCE PLAN plan_2 +POSTHOOK: type: DROP RESOURCEPLAN +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Resource plans + resourcePlanName: plan_2 + +PREHOOK: query: DROP RESOURCE PLAN plan_2 +PREHOOK: type: DROP RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Cannot drop an active resource plan) +PREHOOK: query: DROP RESOURCE PLAN plan_3 +PREHOOK: type: DROP RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP RESOURCE PLAN plan_3 +POSTHOOK: type: DROP RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default ACTIVE 10 default +PREHOOK: query: DROP RESOURCE PLAN plan_99999 +PREHOOK: type: DROP RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Resource plan plan_99999 does not exist +PREHOOK: query: DROP RESOURCE PLAN IF EXISTS plan_99999 +PREHOOK: type: DROP RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP RESOURCE PLAN IF EXISTS plan_99999 +POSTHOOK: type: DROP RESOURCEPLAN +PREHOOK: query: CREATE RESOURCE PLAN `table` +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN `table` +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: ALTER RESOURCE PLAN `table` SET QUERY_PARALLELISM = 1 +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN `table` SET QUERY_PARALLELISM = 1 +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_2 default ACTIVE 10 default +table default DISABLED 1 default +PREHOOK: query: create table wm_test(key string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: INFORMATION_SCHEMA@wm_test +PREHOOK: Output: database:information_schema +POSTHOOK: query: create table wm_test(key string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: INFORMATION_SCHEMA@wm_test +POSTHOOK: Output: database:information_schema +PREHOOK: query: select key as 30min from wm_test +PREHOOK: type: QUERY +PREHOOK: Input: information_schema@wm_test +#### A masked pattern was here #### +POSTHOOK: query: select key as 30min from wm_test +POSTHOOK: type: QUERY +POSTHOOK: Input: information_schema@wm_test +#### A masked pattern was here #### +PREHOOK: query: select "10kb" as str from wm_test +PREHOOK: type: QUERY +PREHOOK: Input: information_schema@wm_test +#### A masked pattern was here #### +POSTHOOK: query: select "10kb" as str from wm_test +POSTHOOK: type: QUERY +POSTHOOK: Input: information_schema@wm_test +#### A masked pattern was here #### +PREHOOK: query: drop table wm_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: information_schema@wm_test +PREHOOK: Output: information_schema@wm_test +POSTHOOK: query: drop table wm_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: information_schema@wm_test +POSTHOOK: Output: information_schema@wm_test +PREHOOK: query: CREATE RESOURCE PLAN plan_1 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN plan_1 +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: EXPLAIN CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL +POSTHOOK: type: CREATE TRIGGER +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create WM Trigger + trigger: + +PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '10kb' DO KILL +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_1 BYTES_READ > '10kb' KILL +PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN ELAPSED_TIME > 300 DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. AlreadyExistsException(message:Trigger already exists, use alter: ) +FAILED: ParseException line 4:60 mismatched input 'AND' expecting DO near ''30sec'' in create trigger statement +FAILED: ParseException line 2:63 mismatched input 'OR' expecting DO near ''30second'' in create trigger statement +FAILED: ParseException line 2:50 mismatched input '>=' expecting > near 'ELAPSED_TIME' in comparisionOperator +FAILED: ParseException line 2:50 mismatched input '<' expecting > near 'ELAPSED_TIME' in comparisionOperator +FAILED: ParseException line 2:50 mismatched input '<=' expecting > near 'ELAPSED_TIME' in comparisionOperator +FAILED: ParseException line 2:50 mismatched input '=' expecting > near 'ELAPSED_TIME' in comparisionOperator +PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '10k' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.IllegalArgumentException: Invalid size unit k +PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '10 millis' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.IllegalArgumentException: Invalid time unit millis +PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '-1000' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.IllegalArgumentException: Illegal value for counter limit. Expected a positive long value. +PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '30hour' DO MOVE TO slow_pool +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '30hour' DO MOVE TO slow_pool +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_1 BYTES_READ > '10kb' KILL +plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +PREHOOK: query: EXPLAIN ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL +PREHOOK: type: ALTER TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL +POSTHOOK: type: ALTER TRIGGER +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter WM Trigger + trigger: + +PREHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL +PREHOOK: type: ALTER TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > '1GB' DO KILL +POSTHOOK: type: ALTER TRIGGER +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_1 BYTES_READ > '1GB' KILL +plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +PREHOOK: query: EXPLAIN DROP TRIGGER plan_1.trigger_1 +PREHOOK: type: DROP TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP TRIGGER plan_1.trigger_1 +POSTHOOK: type: DROP TRIGGER +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop WM Trigger + resourcePlanName: plan_1 + triggerName: trigger_1 + +PREHOOK: query: DROP TRIGGER plan_1.trigger_1 +PREHOOK: type: DROP TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP TRIGGER plan_1.trigger_1 +POSTHOOK: type: DROP TRIGGER +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > '100mb' DO MOVE TO null_pool +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) +PREHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > '100KB' DO MOVE TO `default` +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > '100KB' DO MOVE TO `default` +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > '100MB' DO MOVE TO `default` +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > '100MB' DO MOVE TO `default` +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > "1GB" DO MOVE TO `default` +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > "1GB" DO MOVE TO `default` +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN ELAPSED_TIME > '1hour' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN ELAPSED_TIME > '1hour' DO KILL +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +table default database BYTES_WRITTEN > "1GB" MOVE TO default +table default table BYTES_WRITTEN > '100KB' MOVE TO default +table default trigger BYTES_WRITTEN > '100MB' MOVE TO default +table default trigger1 ELAPSED_TIME > 10 KILL +table default trigger2 ELAPSED_TIME > '1hour' KILL +PREHOOK: query: DROP TRIGGER `table`.`database` +PREHOOK: type: DROP TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP TRIGGER `table`.`database` +POSTHOOK: type: DROP TRIGGER +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +table default table BYTES_WRITTEN > '100KB' MOVE TO default +table default trigger BYTES_WRITTEN > '100MB' MOVE TO default +table default trigger1 ELAPSED_TIME > 10 KILL +table default trigger2 ELAPSED_TIME > '1hour' KILL +PREHOOK: query: ALTER RESOURCE PLAN plan_1 ENABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_1 ENABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ENABLED NULL default +plan_2 default ACTIVE 10 default +table default DISABLED 1 default +PREHOOK: query: DROP TRIGGER plan_1.trigger_2 +PREHOOK: type: DROP TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) +PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000gb" DO KILL +PREHOOK: type: ALTER TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) +PREHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ACTIVE NULL default +plan_2 default ENABLED 10 default +table default DISABLED 1 default +PREHOOK: query: DROP TRIGGER plan_1.trigger_2 +PREHOOK: type: DROP TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) +PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000KB" DO KILL +PREHOOK: type: ALTER TRIGGER +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) +PREHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 0 DO MOVE TO null_pool +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 0 DO MOVE TO null_pool +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +plan_2 default trigger_1 BYTES_READ > 0 MOVE TO null_pool +table default table BYTES_WRITTEN > '100KB' MOVE TO default +table default trigger BYTES_WRITTEN > '100MB' MOVE TO default +table default trigger1 ELAPSED_TIME > 10 KILL +table default trigger2 ELAPSED_TIME > '1hour' KILL +PREHOOK: query: EXPLAIN CREATE POOL plan_1.default WITH + ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE POOL plan_1.default WITH + ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' +POSTHOOK: type: CREATE POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Pool + pool: + +PREHOOK: query: CREATE POOL plan_1.default WITH + ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: SemanticException alloc_fraction should be specified for a pool +FAILED: SemanticException query_parallelism should be specified for a pool +PREHOOK: query: CREATE POOL plan_2.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. AlreadyExistsException(message:Pool already exists: ) +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default default 1.0 5 NULL +table default default 1.0 4 NULL +FAILED: SemanticException Invalid scheduling policy invalid +PREHOOK: query: CREATE POOL plan_2.default.c1 WITH + ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair' +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL plan_2.default.c1 WITH + ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair' +POSTHOOK: type: CREATE POOL +PREHOOK: query: CREATE POOL plan_2.default.c2 WITH + QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.75 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL plan_2.default.c2 WITH + QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.75 +POSTHOOK: type: CREATE POOL +PREHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE +POSTHOOK: type: ALTER RESOURCEPLAN +Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default +PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default]) +PREHOOK: query: EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 +POSTHOOK: type: ALTER POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Pool + pool: + poolPath: default.c2 + +PREHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER POOL plan_2.default.c2 SET SCHEDULING_POLICY='fair' +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL plan_2.default.c2 SET SCHEDULING_POLICY='fair' +POSTHOOK: type: ALTER POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default default 1.0 5 NULL +plan_2 default default.c1 0.3 3 fair +plan_2 default default.c2 0.7 1 fair +table default default 1.0 4 NULL +PREHOOK: query: ALTER POOL plan_2.default.c2 UNSET SCHEDULING_POLICY +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL plan_2.default.c2 UNSET SCHEDULING_POLICY +POSTHOOK: type: ALTER POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default default 1.0 5 NULL +plan_2 default default.c1 0.3 3 fair +plan_2 default default.c2 0.7 1 NULL +table default default 1.0 4 NULL +PREHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE +POSTHOOK: type: ALTER RESOURCEPLAN +warn: Sum of all pools' query parallelism: 9 is less than resource plan query parallelism: 10 +PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: ALTER POOL plan_2.default SET path = def +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL plan_2.default SET path = def +POSTHOOK: type: ALTER POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default def 1.0 5 NULL +plan_2 default def.c1 0.3 3 fair +plan_2 default def.c2 0.7 1 NULL +table default default 1.0 4 NULL +PREHOOK: query: EXPLAIN DROP POOL plan_2.default +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP POOL plan_2.default +POSTHOOK: type: DROP POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop WM Pool + poolName: plan_2 + +PREHOOK: query: DROP POOL plan_2.default +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot delete pool: default) +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default def 1.0 5 NULL +plan_2 default def.c1 0.3 3 fair +plan_2 default def.c2 0.7 1 NULL +table default default 1.0 4 NULL +PREHOOK: query: CREATE POOL plan_2.child1.child2 WITH + QUERY_PARALLELISM=2, SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.8 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Pool path is invalid, the parent does not exist) +PREHOOK: query: CREATE POOL `table`.`table` WITH + SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL `table`.`table` WITH + SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1 +POSTHOOK: type: CREATE POOL +PREHOOK: query: CREATE POOL `table`.`table`.pool1 WITH + SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL `table`.`table`.pool1 WITH + SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9 +POSTHOOK: type: CREATE POOL +PREHOOK: query: CREATE POOL `table`.`table`.pool1.child1 WITH + SCHEDULING_POLICY='fair', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL `table`.`table`.pool1.child1 WITH + SCHEDULING_POLICY='fair', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3 +POSTHOOK: type: CREATE POOL +PREHOOK: query: CREATE POOL `table`.`table`.pool1.child2 WITH + SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL `table`.`table`.pool1.child2 WITH + SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7 +POSTHOOK: type: CREATE POOL +PREHOOK: query: ALTER POOL `table`.`table` SET ALLOC_FRACTION=0.0 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL `table`.`table` SET ALLOC_FRACTION=0.0 +POSTHOOK: type: ALTER POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default def 1.0 5 NULL +plan_2 default def.c1 0.3 3 fair +plan_2 default def.c2 0.7 1 NULL +table default default 1.0 4 NULL +table default table 0.0 1 fifo +table default table.pool1 0.9 3 fair +table default table.pool1.child1 0.3 1 fair +table default table.pool1.child2 0.7 3 fair +PREHOOK: query: ALTER POOL `table`.`table`.pool1 SET PATH = `table`.pool +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL `table`.`table`.pool1 SET PATH = `table`.pool +POSTHOOK: type: ALTER POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default def 1.0 5 NULL +plan_2 default def.c1 0.3 3 fair +plan_2 default def.c2 0.7 1 NULL +table default default 1.0 4 NULL +table default table 0.0 1 fifo +table default table.pool 0.9 3 fair +table default table.pool.child1 0.3 1 fair +table default table.pool.child2 0.7 3 fair +PREHOOK: query: DROP POOL `table`.`table` +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Cannot drop a pool that has child pools) +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default def 1.0 5 NULL +plan_2 default def.c1 0.3 3 fair +plan_2 default def.c2 0.7 1 NULL +table default default 1.0 4 NULL +table default table 0.0 1 fifo +table default table.pool 0.9 3 fair +table default table.pool.child1 0.3 1 fair +table default table.pool.child2 0.7 3 fair +PREHOOK: query: DROP POOL `table`.default +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Cannot drop default pool of a resource plan) +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default def 1.0 5 NULL +plan_2 default def.c1 0.3 3 fair +plan_2 default def.c2 0.7 1 NULL +table default default 1.0 4 NULL +table default table 0.0 1 fifo +table default table.pool 0.9 3 fair +table default table.pool.child1 0.3 1 fair +table default table.pool.child2 0.7 3 fair +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ACTIVE NULL default +plan_2 default DISABLED 10 def +table default DISABLED 1 default +PREHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: DROP POOL `table`.default +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP POOL `table`.default +POSTHOOK: type: DROP POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default def 1.0 5 NULL +plan_2 default def.c1 0.3 3 fair +plan_2 default def.c2 0.7 1 NULL +table default table 0.0 1 fifo +table default table.pool 0.9 3 fair +table default table.pool.child1 0.3 1 fair +table default table.pool.child2 0.7 3 fair +PREHOOK: query: ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN `table` UNSET DEFAULT POOL +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ACTIVE NULL default +plan_2 default DISABLED 10 def +table default DISABLED 1 NULL +PREHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 +POSTHOOK: type: ALTER POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Trigger to pool mappings + resourcePlanName: plan_2 + Pool path: def.c1 + Trigger name: trigger_1 + +PREHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1 +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER POOL plan_2.def.c2 ADD TRIGGER trigger_1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL plan_2.def.c2 ADD TRIGGER trigger_1 +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER POOL `table`.`table` ADD TRIGGER `table` +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL `table`.`table` ADD TRIGGER `table` +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `table` +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `table` +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `trigger1` +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `trigger1` +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER TRIGGER `table`.`trigger1` ADD TO POOL `table`.pool.child2 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` ADD TO POOL `table`.pool.child2 +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger2` +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger2` +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER TRIGGER `table`.`trigger1` ADD TO UNMANAGED +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` ADD TO UNMANAGED +POSTHOOK: type: ALTER POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +plan_2 default def.c1 trigger_1 +plan_2 default def.c2 trigger_1 +table default trigger1 +table default table table +table default table.pool.child1 table +table default table.pool.child1 trigger1 +table default table.pool.child2 trigger1 +table default table.pool.child2 trigger2 +PREHOOK: query: SHOW RESOURCE PLAN `table` +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLAN `table` +POSTHOOK: type: SHOW RESOURCEPLAN +table[status=DISABLED,parallelism=1,defaultPool=null] + + table[allocFraction=0.0,schedulingPolicy=fifo,parallelism=1] + | trigger table: if (BYTES_WRITTEN > '100KB') { MOVE TO default } + + pool[allocFraction=0.9,schedulingPolicy=fair,parallelism=3] + + child2[allocFraction=0.7,schedulingPolicy=fair,parallelism=3] + | trigger trigger1: if (ELAPSED_TIME > 10) { KILL } + | trigger trigger2: if (ELAPSED_TIME > '1hour') { KILL } + + child1[allocFraction=0.3,schedulingPolicy=fair,parallelism=1] + | trigger table: if (BYTES_WRITTEN > '100KB') { MOVE TO default } + | trigger trigger1: if (ELAPSED_TIME > 10) { KILL } + + + | trigger trigger1: if (ELAPSED_TIME > 10) { KILL } + + + | trigger trigger: if (BYTES_WRITTEN > '100MB') { MOVE TO default } +PREHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM POOL `table`.pool.child2 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM POOL `table`.pool.child2 +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM UNMANAGED +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM UNMANAGED +POSTHOOK: type: ALTER POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +plan_2 default def.c1 trigger_1 +plan_2 default def.c2 trigger_1 +table default table table +table default table.pool.child1 table +table default table.pool.child1 trigger1 +table default table.pool.child2 trigger2 +PREHOOK: query: ALTER POOL plan_2.default ADD TRIGGER trigger_1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot find pool: default) +PREHOOK: query: ALTER POOL plan_2.def ADD TRIGGER trigger_2 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) +PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +plan_2 default def.c1 trigger_1 +plan_2 default def.c2 trigger_1 +table default table table +table default table.pool.child1 table +table default table.pool.child1 trigger1 +table default table.pool.child2 trigger2 +PREHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 +POSTHOOK: type: ALTER POOL +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Trigger to pool mappings + resourcePlanName: plan_2 + Pool path: def.c1 + Trigger name: trigger_1 + +PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1 +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_2 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) +PREHOOK: query: DROP POOL `table`.`table`.pool.child1 +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP POOL `table`.`table`.pool.child1 +POSTHOOK: type: DROP POOL +PREHOOK: query: DROP POOL `table`.`table`.pool.child2 +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP POOL `table`.`table`.pool.child2 +POSTHOOK: type: DROP POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +plan_2 default def.c2 trigger_1 +table default table table +PREHOOK: query: EXPLAIN CREATE USER MAPPING "user1" IN plan_2 TO def +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE USER MAPPING "user1" IN plan_2 TO def +POSTHOOK: type: CREATE MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Mapping + mapping: + +PREHOOK: query: CREATE USER MAPPING "user1" IN plan_2 TO def +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE USER MAPPING "user1" IN plan_2 TO def +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: CREATE USER MAPPING 'user2' IN plan_2 TO def WITH ORDER 1 +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE USER MAPPING 'user2' IN plan_2 TO def WITH ORDER 1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: CREATE GROUP MAPPING "group1" IN plan_2 TO def.c1 +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE GROUP MAPPING "group1" IN plan_2 TO def.c1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: CREATE APPLICATION MAPPING "app1" IN plan_2 TO def.c1 +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE APPLICATION MAPPING "app1" IN plan_2 TO def.c1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1 +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: EXPLAIN CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 +POSTHOOK: type: CREATE MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Mapping + mapping: + +PREHOOK: query: CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE GROUP MAPPING 'group3' IN plan_2 UNMANAGED WITH ORDER 1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: EXPLAIN ALTER USER MAPPING "user1" IN plan_2 UNMANAGED +PREHOOK: type: ALTER MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN ALTER USER MAPPING "user1" IN plan_2 UNMANAGED +POSTHOOK: type: ALTER MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Mapping + mapping: + +PREHOOK: query: ALTER USER MAPPING "user1" IN plan_2 UNMANAGED +PREHOOK: type: ALTER MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER USER MAPPING "user1" IN plan_2 UNMANAGED +POSTHOOK: type: ALTER MAPPING +PREHOOK: query: SHOW RESOURCE PLAN plan_2 +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLAN plan_2 +POSTHOOK: type: SHOW RESOURCEPLAN +plan_2[status=DISABLED,parallelism=10,defaultPool=def] + + def[allocFraction=1.0,schedulingPolicy=null,parallelism=5] + | mapped for users: user2 + | mapped for default + + c2[allocFraction=0.7,schedulingPolicy=null,parallelism=1] + | trigger trigger_1: if (BYTES_READ > 0) { MOVE TO null_pool } + | mapped for groups: group2 + + c1[allocFraction=0.3,schedulingPolicy=fair,parallelism=3] + | mapped for groups: group1 + | mapped for applications: app1 + + + | mapped for users: user1 + | mapped for groups: group3 +PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +plan_2 default APPLICATION app1 def.c1 0 +plan_2 default GROUP group1 def.c1 0 +plan_2 default GROUP group2 def.c2 1 +plan_2 default GROUP group3 1 +plan_2 default USER user1 0 +plan_2 default USER user2 def 1 +PREHOOK: query: DROP POOL plan_2.def.c1 +PREHOOK: type: DROP POOL +PREHOOK: Output: dummyHostnameForTest +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidOperationException(message:Please remove all mappings for this pool.) +PREHOOK: query: EXPLAIN DROP USER MAPPING "user2" in plan_2 +PREHOOK: type: DROP MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP USER MAPPING "user2" in plan_2 +POSTHOOK: type: DROP MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop mapping + mapping: + +PREHOOK: query: DROP USER MAPPING "user2" in plan_2 +PREHOOK: type: DROP MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP USER MAPPING "user2" in plan_2 +POSTHOOK: type: DROP MAPPING +PREHOOK: query: EXPLAIN DROP GROUP MAPPING "group2" in plan_2 +PREHOOK: type: DROP MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: EXPLAIN DROP GROUP MAPPING "group2" in plan_2 +POSTHOOK: type: DROP MAPPING +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop mapping + mapping: + +PREHOOK: query: DROP GROUP MAPPING "group2" in plan_2 +PREHOOK: type: DROP MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP GROUP MAPPING "group2" in plan_2 +POSTHOOK: type: DROP MAPPING +PREHOOK: query: DROP GROUP MAPPING "group3" in plan_2 +PREHOOK: type: DROP MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP GROUP MAPPING "group3" in plan_2 +POSTHOOK: type: DROP MAPPING +PREHOOK: query: DROP APPLICATION MAPPING "app1" in plan_2 +PREHOOK: type: DROP MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP APPLICATION MAPPING "app1" in plan_2 +POSTHOOK: type: DROP MAPPING +PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +plan_2 default GROUP group1 def.c1 0 +plan_2 default USER user1 0 +PREHOOK: query: CREATE RESOURCE PLAN plan_4 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN plan_4 +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SHOW RESOURCE PLAN plan_2 +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLAN plan_2 +POSTHOOK: type: SHOW RESOURCEPLAN +plan_2[status=DISABLED,parallelism=10,defaultPool=def] + + def[allocFraction=1.0,schedulingPolicy=null,parallelism=5] + | mapped for default + + c2[allocFraction=0.7,schedulingPolicy=null,parallelism=1] + | trigger trigger_1: if (BYTES_READ > 0) { MOVE TO null_pool } + + c1[allocFraction=0.3,schedulingPolicy=fair,parallelism=3] + | mapped for groups: group1 + + + | mapped for users: user1 +PREHOOK: query: DROP RESOURCE PLAN plan_2 +PREHOOK: type: DROP RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: DROP RESOURCE PLAN plan_2 +POSTHOOK: type: DROP RESOURCEPLAN +PREHOOK: query: CREATE RESOURCE PLAN plan_2 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN plan_2 +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ENABLED NULL default +plan_2 default DISABLED NULL default +plan_4 default ACTIVE NULL default +table default DISABLED 1 NULL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default default 1.0 4 NULL +plan_4 default default 1.0 4 NULL +table default table 0.0 1 fifo +table default table.pool 0.9 3 fair +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +table default table BYTES_WRITTEN > '100KB' MOVE TO default +table default trigger BYTES_WRITTEN > '100MB' MOVE TO default +table default trigger1 ELAPSED_TIME > 10 KILL +table default trigger2 ELAPSED_TIME > '1hour' KILL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +table default table table +PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +PREHOOK: query: CREATE RESOURCE PLAN plan_4a LIKE plan_4 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN plan_4a LIKE plan_4 +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: CREATE POOL plan_4a.pool1 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=2, ALLOC_FRACTION=0.0 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL plan_4a.pool1 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=2, ALLOC_FRACTION=0.0 +POSTHOOK: type: CREATE POOL +PREHOOK: query: CREATE USER MAPPING "user1" IN plan_4a TO pool1 +PREHOOK: type: CREATE MAPPING +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE USER MAPPING "user1" IN plan_4a TO pool1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ > '10GB' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ > '10GB' DO KILL +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ > '11GB' DO KILL +PREHOOK: type: CREATE TRIGGER +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ > '11GB' DO KILL +POSTHOOK: type: CREATE TRIGGER +PREHOOK: query: ALTER POOL plan_4a.pool1 ADD TRIGGER trigger_2 +PREHOOK: type: ALTER POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER POOL plan_4a.pool1 ADD TRIGGER trigger_2 +POSTHOOK: type: ALTER POOL +PREHOOK: query: CREATE RESOURCE PLAN plan_4b LIKE plan_4a +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN plan_4b LIKE plan_4a +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: CREATE POOL plan_4b.pool2 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL plan_4b.pool2 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 +POSTHOOK: type: CREATE POOL +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ENABLED NULL default +plan_2 default DISABLED NULL default +plan_4 default ACTIVE NULL default +plan_4a default DISABLED NULL default +plan_4b default DISABLED NULL default +table default DISABLED 1 NULL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default default 1.0 4 NULL +plan_4 default default 1.0 4 NULL +plan_4a default default 1.0 4 NULL +plan_4a default pool1 0.0 2 fair +plan_4b default default 1.0 4 NULL +plan_4b default pool1 0.0 2 fair +plan_4b default pool2 0.0 3 fair +table default table 0.0 1 fifo +table default table.pool 0.9 3 fair +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers +#### A masked pattern was here #### +plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool +plan_4a default trigger_1 BYTES_READ > '10GB' KILL +plan_4a default trigger_2 BYTES_READ > '11GB' KILL +plan_4b default trigger_1 BYTES_READ > '10GB' KILL +plan_4b default trigger_2 BYTES_READ > '11GB' KILL +table default table BYTES_WRITTEN > '100KB' MOVE TO default +table default trigger BYTES_WRITTEN > '100MB' MOVE TO default +table default trigger1 ELAPSED_TIME > 10 KILL +table default trigger2 ELAPSED_TIME > '1hour' KILL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools_to_triggers +#### A masked pattern was here #### +plan_4a default pool1 trigger_2 +plan_4b default pool1 trigger_2 +table default table table +PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +plan_4a default USER user1 pool1 0 +plan_4b default USER user1 pool1 0 +PREHOOK: query: REPLACE RESOURCE PLAN plan_4a WITH plan_4b +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: REPLACE RESOURCE PLAN plan_4a WITH plan_4b +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ENABLED NULL default +plan_2 default DISABLED NULL default +plan_4 default ACTIVE NULL default +plan_4a default DISABLED NULL default +plan_4a_old_0 default DISABLED NULL default +table default DISABLED 1 NULL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default default 1.0 4 NULL +plan_4 default default 1.0 4 NULL +plan_4a default default 1.0 4 NULL +plan_4a default pool1 0.0 2 fair +plan_4a default pool2 0.0 3 fair +plan_4a_old_0 default default 1.0 4 NULL +plan_4a_old_0 default pool1 0.0 2 fair +table default table 0.0 1 fifo +table default table.pool 0.9 3 fair +PREHOOK: query: SHOW RESOURCE PLAN plan_4a_old_0 +PREHOOK: type: SHOW RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: SHOW RESOURCE PLAN plan_4a_old_0 +POSTHOOK: type: SHOW RESOURCEPLAN +plan_4a_old_0[status=DISABLED,parallelism=null,defaultPool=default] + + default[allocFraction=1.0,schedulingPolicy=null,parallelism=4] + | mapped for default + + pool1[allocFraction=0.0,schedulingPolicy=fair,parallelism=2] + | trigger trigger_2: if (BYTES_READ > '11GB') { KILL } + | mapped for users: user1 + + + | trigger trigger_1: if (BYTES_READ > '10GB') { KILL } +PREHOOK: query: REPLACE ACTIVE RESOURCE PLAN WITH plan_4a +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: REPLACE ACTIVE RESOURCE PLAN WITH plan_4a +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ENABLED NULL default +plan_2 default DISABLED NULL default +plan_4 default ACTIVE NULL default +plan_4_old_0 default DISABLED NULL default +plan_4a_old_0 default DISABLED NULL default +table default DISABLED 1 NULL +PREHOOK: query: CREATE RESOURCE PLAN plan_4a LIKE plan_4 +PREHOOK: type: CREATE RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE RESOURCE PLAN plan_4a LIKE plan_4 +POSTHOOK: type: CREATE RESOURCEPLAN +PREHOOK: query: CREATE POOL plan_4a.pool3 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 +PREHOOK: type: CREATE POOL +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: CREATE POOL plan_4a.pool3 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.0 +POSTHOOK: type: CREATE POOL +PREHOOK: query: ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE +PREHOOK: type: ALTER RESOURCEPLAN +PREHOOK: Output: dummyHostnameForTest +POSTHOOK: query: ALTER RESOURCE PLAN plan_4a ENABLE ACTIVATE WITH REPLACE +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 default ENABLED NULL default +plan_2 default DISABLED NULL default +plan_4 default ACTIVE NULL default +plan_4_old_0 default DISABLED NULL default +plan_4_old_1 default DISABLED NULL default +plan_4a_old_0 default DISABLED NULL default +table default DISABLED 1 NULL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_1 default default 1.0 4 NULL +plan_2 default default 1.0 4 NULL +plan_4 default default 1.0 4 NULL +plan_4 default pool1 0.0 2 fair +plan_4 default pool2 0.0 3 fair +plan_4 default pool3 0.0 3 fair +plan_4_old_0 default default 1.0 4 NULL +plan_4_old_1 default default 1.0 4 NULL +plan_4_old_1 default pool1 0.0 2 fair +plan_4_old_1 default pool2 0.0 3 fair +plan_4a_old_0 default default 1.0 4 NULL +plan_4a_old_0 default pool1 0.0 2 fair +table default table 0.0 1 fifo +table default table.pool 0.9 3 fair