diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 6dc052db45..eded92e1c1 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -987,7 +987,7 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int } @Override - public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException { return objectStore.getResourcePlan(name); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index b947ab1cb4..cf8386bac5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -696,14 +696,12 @@ private int showResourcePlans(Hive db, ShowResourcePlanDesc showResourcePlanDesc // Note: Enhance showResourcePlan to display all the pools, triggers and mappings. DataOutputStream out = getOutputStream(showResourcePlanDesc.getResFile()); try { - List resourcePlans; String rpName = showResourcePlanDesc.getResourcePlanName(); if (rpName != null) { - resourcePlans = Collections.singletonList(db.getResourcePlan(rpName)); + formatter.showFullResourcePlan(out, db.getResourcePlan(rpName)); } else { - resourcePlans = db.getAllResourcePlans(); + formatter.showResourcePlans(out, db.getAllResourcePlans()); } - formatter.showResourcePlans(out, resourcePlans); } catch (Exception e) { throw new HiveException(e); } finally { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 022ba04fbe..20d7593d88 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -4758,7 +4758,7 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) } } - public WMResourcePlan getResourcePlan(String rpName) throws HiveException { + public WMFullResourcePlan getResourcePlan(String rpName) throws HiveException { try { return getMSC().getResourcePlan(rpName); } catch (NoSuchObjectException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java index a44adf6b76..035c5fcc91 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.metadata.formatting; +import java.io.Closeable; import java.io.DataOutputStream; import java.io.IOException; import java.io.OutputStream; @@ -39,6 +40,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -448,6 +450,101 @@ public void showResourcePlans(DataOutputStream out, List resourc } } + /** + * Formats a resource plan into a json object, the structure is as follows: + * { + * name: "", + * parallelism: "", + * defaultQueue: "", + * pools : [ + * { + * name: "", + * parallelism: "", + * schedulingPolicy: "", + * triggers: [ + * { name: "", trigger: "", action: "} + * ... + * ] + * } + * ... + * ] + * } + */ + private static class JsonRPFormatter implements MetaDataFormatUtils.RPFormatter, Closeable { + private final JsonGenerator generator; + private boolean inPool = false; + + JsonRPFormatter(DataOutputStream out) throws IOException { + generator = new ObjectMapper().getJsonFactory().createJsonGenerator(out); + } + + private void writeNameAndFields(String name, Object ... kvPairs) throws IOException { + if (kvPairs.length % 2 != 0) { + throw new IllegalArgumentException("Expected pairs"); + } + generator.writeStringField("name", name); + for (int i = 0; i < kvPairs.length; i += 2) { + generator.writeObjectField(kvPairs[i].toString(), kvPairs[i + 1]); + } + } + + @Override + public void formatRP(String rpName, Object ... kvPairs) throws IOException { + generator.writeStartObject(); + writeNameAndFields(rpName, kvPairs); + generator.writeArrayFieldStart("pools"); + } + + @Override + public void formatPool(String poolName, int indentLevel, Object ... kvPairs) + throws IOException { + if (inPool) { + // End the triggers array. + generator.writeEndArray(); + // End the pool object. + generator.writeEndObject(); + } else { + inPool = true; + } + generator.writeStartObject(); + writeNameAndFields(poolName, kvPairs); + generator.writeArrayFieldStart("triggers"); + // triggers array and pool object left to be ended. + } + + @Override + public void formatTrigger(String triggerName, String actionExpression, String triggerExpression, + int indentLevel) throws IOException { + generator.writeStartObject(); + writeNameAndFields(triggerName, "action", actionExpression, "trigger", triggerExpression); + generator.writeEndObject(); + } + + @Override + public void close() throws IOException { + if (inPool) { + // end the triggers within pool object. + generator.writeEndArray(); + // End the last pool object. + generator.writeEndObject(); + } + // End the pools array. + generator.writeEndArray(); + // End the root rp object. + generator.writeEndObject(); + generator.close(); + } + } + + public void showFullResourcePlan(DataOutputStream out, WMFullResourcePlan resourcePlan) + throws HiveException { + try (JsonRPFormatter formatter = new JsonRPFormatter(out)) { + MetaDataFormatUtils.formatFullRP(formatter, resourcePlan); + } catch (IOException e) { + throw new HiveException(e); + } + } + @Override public void showErrors(DataOutputStream out, List errors) throws HiveException { JsonGenerator generator = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index aa1a99ddf7..489842ee82 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -36,9 +36,15 @@ import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.ql.index.HiveIndex; import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo; import org.apache.hadoop.hive.ql.metadata.Table; @@ -54,11 +60,13 @@ import com.google.common.collect.Lists; +import java.io.IOException; import java.math.BigInteger; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.Date; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -717,4 +725,124 @@ public static MetaDataFormatter getFormatter(HiveConf conf) { } } + /** + * Interface to implement actual conversion to text or json of a resource plan. + */ + public interface RPFormatter { + void formatRP(String rpName, Object ... kvPairs) throws IOException; + void formatPool(String poolName, int indentLevel, Object ...kvPairs) throws IOException; + void formatTrigger(String triggerName, String actionExpression, String triggerExpression, + int indentLevel) throws IOException; + } + + /** + * A n-ary tree for the pools, each node contains a pool and its children. + */ + private static class PoolTreeNode { + private WMPool pool; + private final List children = new ArrayList<>(); + private final List triggers = new ArrayList<>(); + + private PoolTreeNode() {} + + private void writePoolTreeNode(RPFormatter rpFormatter, int indentLevel) throws IOException { + String path = pool.getPoolPath(); + int idx = path.lastIndexOf('.'); + if (idx != -1) { + path = path.substring(idx + 1); + } + Double allocFraction = pool.getAllocFraction(); + String schedulingPolicy = pool.isSetSchedulingPolicy() ? pool.getSchedulingPolicy() : null; + Integer parallelism = pool.getQueryParallelism(); + + rpFormatter.formatPool(path, indentLevel, "allocFraction", allocFraction, + "schedulingPolicy", schedulingPolicy, "parallelism", parallelism); + for (WMTrigger trigger : triggers) { + rpFormatter.formatTrigger(trigger.getTriggerName(), trigger.getActionExpression(), + trigger.getTriggerExpression(), indentLevel); + } + for (PoolTreeNode node : children) { + node.writePoolTreeNode(rpFormatter, indentLevel + 1); + } + } + + private void sortChildren() { + children.sort((PoolTreeNode p1, PoolTreeNode p2) -> + Double.compare(p2.pool.getAllocFraction(), p1.pool.getAllocFraction())); + for (PoolTreeNode child : children) { + child.sortChildren(); + } + } + + static PoolTreeNode makePoolTree(WMFullResourcePlan fullRp) { + Map poolMap = new HashMap<>(); + PoolTreeNode root = new PoolTreeNode(); + for (WMPool pool : fullRp.getPools()) { + // Create or add node for current pool. + String path = pool.getPoolPath(); + PoolTreeNode curr = poolMap.get(path); + if (curr == null) { + curr = new PoolTreeNode(); + poolMap.put(path, curr); + } + curr.pool = pool; + + // Add this node to the parent node. + int ind = path.lastIndexOf('.'); + PoolTreeNode parent; + if (ind == -1) { + parent = root; + } else { + String parentPath = path.substring(0, ind); + parent = poolMap.get(parentPath); + if (parent == null) { + parent = new PoolTreeNode(); + poolMap.put(parentPath, parent); + } + } + parent.children.add(curr); + } + Map triggerMap = new HashMap<>(); + if (fullRp.getTriggers() != null) { + for (WMTrigger trigger : fullRp.getTriggers()) { + triggerMap.put(trigger.getTriggerName(), trigger); + } + } + if (fullRp.getPoolTriggers() != null) { + for (WMPoolTrigger pool2Trigger : fullRp.getPoolTriggers()) { + PoolTreeNode node = poolMap.get(pool2Trigger.getPool()); + WMTrigger trigger = triggerMap.get(pool2Trigger.getTrigger()); + if (node == null || trigger == null) { + throw new IllegalStateException("Invalid trigger to pool: " + pool2Trigger.getPool() + + ", " + pool2Trigger.getTrigger()); + } + node.triggers.add(trigger); + } + } + return root; + } + } + + private static void writeRPLine(RPFormatter rpFormatter, WMResourcePlan plan) + throws IOException { + Integer parallelism = plan.isSetQueryParallelism() ? plan.getQueryParallelism() : null; + String defaultPool = plan.isSetDefaultPoolPath() ? plan.getDefaultPoolPath() : null; + rpFormatter.formatRP(plan.getName(), "status", plan.getStatus().toString(), + "parallelism", parallelism, "defaultPool", defaultPool); + } + + public static void formatFullRP(RPFormatter rpFormatter, WMFullResourcePlan fullRp) + throws HiveException { + try { + WMResourcePlan plan = fullRp.getPlan(); + writeRPLine(rpFormatter, plan); + PoolTreeNode root = PoolTreeNode.makePoolTree(fullRp); + root.sortChildren(); + for (PoolTreeNode pool : root.children) { + pool.writePoolTreeNode(rpFormatter, 1); + } + } catch (IOException e) { + throw new HiveException(e); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java index 6ba474c4bf..cfc381b23c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -121,10 +122,12 @@ public void showDatabaseDescription (DataOutputStream out, String database, Stri String location, String ownerName, String ownerType, Map params) throws HiveException; - public void showResourcePlans(DataOutputStream out, List resourcePlans) + void showResourcePlans(DataOutputStream out, List resourcePlans) throws HiveException; - public void showErrors(DataOutputStream out, List errors) + void showFullResourcePlan(DataOutputStream out, WMFullResourcePlan resourcePlan) throws HiveException; + + void showErrors(DataOutputStream out, List errors) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index f3d878d3f7..0f1e893de6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -21,6 +21,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -40,6 +41,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; @@ -544,24 +546,26 @@ public void showDatabaseDescription(DataOutputStream outStream, String database, } } + private static final Charset UTF_8 = Charset.forName("UTF-8"); + public void showResourcePlans(DataOutputStream out, List resourcePlans) throws HiveException { try { for (WMResourcePlan plan : resourcePlans) { - out.write(plan.getName().getBytes("UTF-8")); + out.write(plan.getName().getBytes(UTF_8)); out.write(separator); - out.write(plan.getStatus().name().getBytes("UTF-8")); + out.write(plan.getStatus().name().getBytes(UTF_8)); out.write(separator); if (plan.isSetQueryParallelism()) { - out.writeBytes(Integer.toString(plan.getQueryParallelism())); + out.write(Integer.toString(plan.getQueryParallelism()).getBytes(UTF_8)); } else { - out.writeBytes("null"); + out.write("null".getBytes(UTF_8)); } out.write(separator); if (plan.isSetDefaultPoolPath()) { - out.write(plan.getDefaultPoolPath().getBytes("UTF-8")); + out.write(plan.getDefaultPoolPath().getBytes(UTF_8)); } else { - out.writeBytes("null"); + out.write("null".getBytes(UTF_8)); } out.write(terminator); } @@ -570,6 +574,83 @@ public void showResourcePlans(DataOutputStream out, List resourc } } + /** + * Class to print text records for resource plans in the following format: + * + * [status=,parallelism=,defaultPool=] + * [allocFraction=,schedulingPolicy=,parallelism=] + * > : if(){} + */ + private static class TextRPFormatter implements MetaDataFormatUtils.RPFormatter { + private final DataOutputStream out; + + TextRPFormatter(DataOutputStream out) { + this.out = out; + } + + @Override + public void formatRP(String rpName, Object ... kvPairs) throws IOException { + out.write(rpName.getBytes(UTF_8)); + writeFields(kvPairs); + out.write(terminator); + } + + private static final byte[] INDENT = " ".getBytes(UTF_8); + + @Override + public void formatPool(String poolName, int indentLevel, Object ... kvPairs) + throws IOException { + for (int i = 0; i < indentLevel; ++i) { + out.write(INDENT); + } + out.write(poolName.getBytes(UTF_8)); + writeFields(kvPairs); + out.write(terminator); + } + + private void writeFields(Object ... kvPairs) + throws IOException { + if (kvPairs.length % 2 != 0) { + throw new IllegalArgumentException("Expected pairs, got: " + kvPairs.length); + } + if (kvPairs.length < 2) { + return; + } + out.write('['); + out.write(kvPairs[0].toString().getBytes(UTF_8)); + out.write('='); + out.write((kvPairs[1] == null ? "null" : kvPairs[1].toString()).getBytes(UTF_8)); + for (int i = 2; i < kvPairs.length; i += 2) { + out.write(','); + out.write(kvPairs[i].toString().getBytes(UTF_8)); + out.write('='); + out.write((kvPairs[i + 1] == null ? "null" : kvPairs[i + 1].toString()).getBytes(UTF_8)); + } + out.write(']'); + } + + @Override + public void formatTrigger(String triggerName, String actionExpression, String triggerExpression, + int indentLevel) throws IOException { + for (int i = 0; i < indentLevel; ++i) { + out.write(INDENT); + } + out.write(" > ".getBytes(UTF_8)); + out.write(triggerName.getBytes(UTF_8)); + out.write(": if(".getBytes(UTF_8)); + out.write(triggerExpression.getBytes(UTF_8)); + out.write("){".getBytes(UTF_8)); + out.write(actionExpression.getBytes(UTF_8)); + out.write('}'); + out.write(terminator); + } + } + + public void showFullResourcePlan(DataOutputStream out, WMFullResourcePlan fullResourcePlan) + throws HiveException { + MetaDataFormatUtils.formatFullRP(new TextRPFormatter(out), fullResourcePlan); + } + public void showErrors(DataOutputStream out, List errors) throws HiveException { try { for (String error : errors) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index a092d3d878..c413bc5126 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.parse; -import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager; - import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.antlr.runtime.tree.CommonTree; @@ -937,7 +935,7 @@ private void analyzeShowResourcePlan(ASTNode ast) throws SemanticException { ShowResourcePlanDesc showResourcePlanDesc = new ShowResourcePlanDesc(rpName, ctx.getResFile()); rootTasks.add(TaskFactory.get( new DDLWork(getInputs(), getOutputs(), showResourcePlanDesc), conf)); - setFetchTask(createFetchTask(showResourcePlanDesc.getSchema())); + setFetchTask(createFetchTask(showResourcePlanDesc.getSchema(rpName))); } private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java index 0b4cfb5adc..36aeabbb9f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java @@ -23,12 +23,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.plan.Explain.Level; -@Explain(displayName = "Show Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +@Explain(displayName = "Show Resource plans", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public class ShowResourcePlanDesc extends DDLDesc implements Serializable { private static final long serialVersionUID = 6076076933035978545L; - private static final String table = "show_resourceplan"; - private static final String schema = "rp_name,status,query_parallelism#string,string,int"; + private static final String TABLE = "show_resourceplan"; + private static final String ALL_SCHEMA = "rp_name,status,query_parallelism#string,string,int"; + private static final String SINGLE_SCHEMA = "line#string"; String resFile; String resourcePlanName; @@ -50,16 +52,17 @@ public void setResFile(String resFile) { this.resFile = resFile; } - @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + @Explain(displayName="resourcePlanName", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getResourcePlanName() { return resourcePlanName; } public String getTable() { - return table; + return TABLE; } - public String getSchema() { - return schema; + public String getSchema(String rpName) { + return (rpName == null) ? ALL_SCHEMA : SINGLE_SCHEMA; } } diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/formatting/TestJsonRPFormatter.java ql/src/test/org/apache/hadoop/hive/ql/metadata/formatting/TestJsonRPFormatter.java new file mode 100644 index 0000000000..f2b689098d --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/formatting/TestJsonRPFormatter.java @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.metadata.formatting; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.util.ArrayList; + +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.map.ObjectMapper; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Test class for json resource plan formatter. + */ +public class TestJsonRPFormatter { + private final JsonMetaDataFormatter formatter = new JsonMetaDataFormatter(); + + private ByteArrayOutputStream bos; + private DataOutputStream out; + + @Before + public void setup() { + bos = new ByteArrayOutputStream(); + out = new DataOutputStream(bos); + } + + @After + public void teardown() throws Exception { + out.close(); + bos.close(); + } + + private WMFullResourcePlan createRP(String name, Integer parallelism, String defaultPoolPath) { + WMResourcePlan rp = new WMResourcePlan(name); + rp.setStatus(WMResourcePlanStatus.ACTIVE); + if (parallelism != null) { + rp.setQueryParallelism(parallelism); + } + if (defaultPoolPath != null) { + rp.setDefaultPoolPath(defaultPoolPath); + } + WMFullResourcePlan fullRp = new WMFullResourcePlan(rp, new ArrayList<>()); + return fullRp; + } + + private void addPool(WMFullResourcePlan fullRp, String poolName, double allocFraction, + int parallelism, String policy) { + WMPool pool = new WMPool(fullRp.getPlan().getName(), poolName); + pool.setAllocFraction(allocFraction); + pool.setQueryParallelism(parallelism); + if (policy != null) { + pool.setSchedulingPolicy(policy); + } + fullRp.addToPools(pool); + } + + private void addTrigger(WMFullResourcePlan fullRp, String triggerName, String action, + String expr, String poolName) { + WMTrigger trigger = new WMTrigger(fullRp.getPlan().getName(), triggerName); + trigger.setActionExpression(action); + trigger.setTriggerExpression(expr); + fullRp.addToTriggers(trigger); + + WMPoolTrigger pool2Trigger = new WMPoolTrigger(poolName, triggerName); + fullRp.addToPoolTriggers(pool2Trigger); + } + + @Test + public void testJsonEmptyRPFormatter() throws Exception { + WMFullResourcePlan fullRp = createRP("test_rp_1", null, null); + formatter.showFullResourcePlan(out, fullRp); + out.flush(); + + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode jsonTree = objectMapper.readTree(bos.toByteArray()); + + assertNotNull(jsonTree); + assertTrue(jsonTree.isObject()); + assertEquals("test_rp_1", jsonTree.get("name").asText()); + assertTrue(jsonTree.get("parallelism").isNull()); + assertTrue(jsonTree.get("defaultPool").isNull()); + assertTrue(jsonTree.get("pools").isArray()); + assertEquals(0, jsonTree.get("pools").size()); + } + + @Test + public void testJsonRPFormatter() throws Exception { + WMFullResourcePlan fullRp = createRP("test_rp_2", 10, "def"); + addPool(fullRp, "pool1", 0.3, 3, "fair"); + addTrigger(fullRp, "trigger1", "KILL", "BYTES > 2", "pool1"); + addPool(fullRp, "pool2", 0.7, 7, "fcfs"); + formatter.showFullResourcePlan(out, fullRp); + out.flush(); + + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode jsonTree = objectMapper.readTree(bos.toByteArray()); + + assertNotNull(jsonTree); + assertTrue(jsonTree.isObject()); + assertEquals("test_rp_2", jsonTree.get("name").asText()); + assertEquals(10, jsonTree.get("parallelism").asInt()); + assertEquals("def", jsonTree.get("defaultPool").asText()); + assertTrue(jsonTree.get("pools").isArray()); + assertEquals(2, jsonTree.get("pools").size()); + + JsonNode pool2 = jsonTree.get("pools").get(0); + assertEquals("pool2", pool2.get("name").asText()); + assertEquals("fcfs", pool2.get("schedulingPolicy").asText()); + assertEquals(7, pool2.get("parallelism").asInt()); + assertEquals(0.7, pool2.get("allocFraction").asDouble(), 0.00001); + assertTrue(pool2.get("triggers").isArray()); + assertEquals(0, pool2.get("triggers").size()); + + JsonNode pool1 = jsonTree.get("pools").get(1); + assertEquals("pool1", pool1.get("name").asText()); + assertEquals("fair", pool1.get("schedulingPolicy").asText()); + assertEquals(3, pool1.get("parallelism").asInt()); + assertEquals(0.3, pool1.get("allocFraction").asDouble(), 0.00001); + assertTrue(pool1.get("triggers").isArray()); + assertEquals(1, pool1.get("triggers").size()); + + JsonNode trigger1 = pool1.get("triggers").get(0); + assertEquals("trigger1", trigger1.get("name").asText()); + assertEquals("KILL", trigger1.get("action").asText()); + assertEquals("BYTES > 2", trigger1.get("trigger").asText()); + } +} diff --git ql/src/test/queries/clientpositive/resourceplan.q ql/src/test/queries/clientpositive/resourceplan.q index a4a9b7c811..ce684ed51b 100644 --- ql/src/test/queries/clientpositive/resourceplan.q +++ ql/src/test/queries/clientpositive/resourceplan.q @@ -330,6 +330,8 @@ CREATE RESOURCE PLAN plan_4; ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE; +SHOW RESOURCE PLAN plan_2; + -- This should remove all pools, triggers & mappings. DROP RESOURCE PLAN plan_2; diff --git ql/src/test/results/clientpositive/llap/resourceplan.q.out ql/src/test/results/clientpositive/llap/resourceplan.q.out index 94cf877d0c..a9c93ab9c5 100644 --- ql/src/test/results/clientpositive/llap/resourceplan.q.out +++ ql/src/test/results/clientpositive/llap/resourceplan.q.out @@ -3158,7 +3158,8 @@ PREHOOK: query: SHOW RESOURCE PLAN plan_1 PREHOOK: type: SHOW RESOURCEPLAN POSTHOOK: query: SHOW RESOURCE PLAN plan_1 POSTHOOK: type: SHOW RESOURCEPLAN -plan_1 DISABLED +plan_1[status=DISABLED,parallelism=null,defaultPool=default] + default[allocFraction=1.0,schedulingPolicy=,parallelism=4] PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -3182,7 +3183,8 @@ PREHOOK: query: SHOW RESOURCE PLAN plan_2 PREHOOK: type: SHOW RESOURCEPLAN POSTHOOK: query: SHOW RESOURCE PLAN plan_2 POSTHOOK: type: SHOW RESOURCEPLAN -plan_2 DISABLED 4 +plan_2[status=DISABLED,parallelism=4,defaultPool=default] + default[allocFraction=1.0,schedulingPolicy=,parallelism=4] PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -4079,6 +4081,15 @@ PREHOOK: query: ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE PREHOOK: type: ALTER RESOURCEPLAN POSTHOOK: query: ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: SHOW RESOURCE PLAN plan_2 +PREHOOK: type: SHOW RESOURCEPLAN +POSTHOOK: query: SHOW RESOURCE PLAN plan_2 +POSTHOOK: type: SHOW RESOURCEPLAN +plan_2[status=DISABLED,parallelism=4,defaultPool=def] + def[allocFraction=1.0,schedulingPolicy=,parallelism=4] + c2[allocFraction=0.7,schedulingPolicy=fair,parallelism=1] + > trigger_1: if(BYTES_READ = 0){MOVE TO null_pool} + c1[allocFraction=0.3,schedulingPolicy=fair,parallelism=3] PREHOOK: query: DROP RESOURCE PLAN plan_2 PREHOOK: type: DROP RESOURCEPLAN POSTHOOK: query: DROP RESOURCE PLAN plan_2 diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index ac7597a5bf..87a5c293f0 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -22257,7 +22257,7 @@ WMGetResourcePlanResponse::~WMGetResourcePlanResponse() throw() { } -void WMGetResourcePlanResponse::__set_resourcePlan(const WMResourcePlan& val) { +void WMGetResourcePlanResponse::__set_resourcePlan(const WMFullResourcePlan& val) { this->resourcePlan = val; __isset.resourcePlan = true; } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 442255c6a7..74ce8d3014 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -9184,11 +9184,11 @@ class WMGetResourcePlanResponse { } virtual ~WMGetResourcePlanResponse() throw(); - WMResourcePlan resourcePlan; + WMFullResourcePlan resourcePlan; _WMGetResourcePlanResponse__isset __isset; - void __set_resourcePlan(const WMResourcePlan& val); + void __set_resourcePlan(const WMFullResourcePlan& val); bool operator == (const WMGetResourcePlanResponse & rhs) const { diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetResourcePlanResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetResourcePlanResponse.java index 638728e92c..7341dee624 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetResourcePlanResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetResourcePlanResponse.java @@ -46,7 +46,7 @@ schemes.put(TupleScheme.class, new WMGetResourcePlanResponseTupleSchemeFactory()); } - private WMResourcePlan resourcePlan; // optional + private WMFullResourcePlan resourcePlan; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -112,7 +112,7 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RESOURCE_PLAN, new org.apache.thrift.meta_data.FieldMetaData("resourcePlan", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMResourcePlan.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMFullResourcePlan.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMGetResourcePlanResponse.class, metaDataMap); } @@ -125,7 +125,7 @@ public WMGetResourcePlanResponse() { */ public WMGetResourcePlanResponse(WMGetResourcePlanResponse other) { if (other.isSetResourcePlan()) { - this.resourcePlan = new WMResourcePlan(other.resourcePlan); + this.resourcePlan = new WMFullResourcePlan(other.resourcePlan); } } @@ -138,11 +138,11 @@ public void clear() { this.resourcePlan = null; } - public WMResourcePlan getResourcePlan() { + public WMFullResourcePlan getResourcePlan() { return this.resourcePlan; } - public void setResourcePlan(WMResourcePlan resourcePlan) { + public void setResourcePlan(WMFullResourcePlan resourcePlan) { this.resourcePlan = resourcePlan; } @@ -167,7 +167,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetResourcePlan(); } else { - setResourcePlan((WMResourcePlan)value); + setResourcePlan((WMFullResourcePlan)value); } break; @@ -328,7 +328,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetResourcePlanRe switch (schemeField.id) { case 1: // RESOURCE_PLAN if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.resourcePlan = new WMResourcePlan(); + struct.resourcePlan = new WMFullResourcePlan(); struct.resourcePlan.read(iprot); struct.setResourcePlanIsSet(true); } else { @@ -387,7 +387,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetResourcePlanRes TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.resourcePlan = new WMResourcePlan(); + struct.resourcePlan = new WMFullResourcePlan(); struct.resourcePlan.read(iprot); struct.setResourcePlanIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index ab1887e53e..74f39ff761 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -21841,7 +21841,7 @@ class WMGetResourcePlanResponse { static $_TSPEC; /** - * @var \metastore\WMResourcePlan + * @var \metastore\WMFullResourcePlan */ public $resourcePlan = null; @@ -21851,7 +21851,7 @@ class WMGetResourcePlanResponse { 1 => array( 'var' => 'resourcePlan', 'type' => TType::STRUCT, - 'class' => '\metastore\WMResourcePlan', + 'class' => '\metastore\WMFullResourcePlan', ), ); } @@ -21883,7 +21883,7 @@ class WMGetResourcePlanResponse { { case 1: if ($ftype == TType::STRUCT) { - $this->resourcePlan = new \metastore\WMResourcePlan(); + $this->resourcePlan = new \metastore\WMFullResourcePlan(); $xfer += $this->resourcePlan->read($input); } else { $xfer += $input->skip($ftype); diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 1fbe1a1060..8787326edf 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -15457,7 +15457,7 @@ class WMGetResourcePlanResponse: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'resourcePlan', (WMResourcePlan, WMResourcePlan.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'resourcePlan', (WMFullResourcePlan, WMFullResourcePlan.thrift_spec), None, ), # 1 ) def __init__(self, resourcePlan=None,): @@ -15474,7 +15474,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.resourcePlan = WMResourcePlan() + self.resourcePlan = WMFullResourcePlan() self.resourcePlan.read(iprot) else: iprot.skip(ftype) diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index a090f792d2..46be4fb395 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -3496,7 +3496,7 @@ class WMGetResourcePlanResponse RESOURCEPLAN = 1 FIELDS = { - RESOURCEPLAN => {:type => ::Thrift::Types::STRUCT, :name => 'resourcePlan', :class => ::WMResourcePlan, :optional => true} + RESOURCEPLAN => {:type => ::Thrift::Types::STRUCT, :name => 'resourcePlan', :class => ::WMFullResourcePlan, :optional => true} } def struct_fields; FIELDS; end diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index f1b58c526d..a1eeb29ec0 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -7307,7 +7307,7 @@ public WMCreateResourcePlanResponse create_resource_plan(WMCreateResourcePlanReq public WMGetResourcePlanResponse get_resource_plan(WMGetResourcePlanRequest request) throws NoSuchObjectException, MetaException, TException { try { - WMResourcePlan rp = getMS().getResourcePlan(request.getResourcePlanName()); + WMFullResourcePlan rp = getMS().getResourcePlan(request.getResourcePlanName()); WMGetResourcePlanResponse resp = new WMGetResourcePlanResponse(); resp.setResourcePlan(rp); return resp; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 2b6b0b64c8..16d08b1f06 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2613,7 +2613,7 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) } @Override - public WMResourcePlan getResourcePlan(String resourcePlanName) + public WMFullResourcePlan getResourcePlan(String resourcePlanName) throws NoSuchObjectException, MetaException, TException { WMGetResourcePlanRequest request = new WMGetResourcePlanRequest(); request.setResourcePlanName(resourcePlanName); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 6905bd4f8a..4d68217014 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1774,7 +1774,7 @@ void addNotNullConstraint(List notNullConstraintCols) thro void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) throws InvalidObjectException, MetaException, TException; - WMResourcePlan getResourcePlan(String resourcePlanName) + WMFullResourcePlan getResourcePlan(String resourcePlanName) throws NoSuchObjectException, MetaException, TException; List getAllResourcePlans() diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index b708fae7ec..5bfa62485b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -20,7 +20,6 @@ import static org.apache.commons.lang.StringUtils.join; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; -import java.util.Random; import com.google.common.collect.Sets; import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; import org.apache.hadoop.hive.metastore.api.WMMapping; @@ -9702,12 +9701,18 @@ private WMMapping fromMMapping(MWMMapping mMapping, String rpName) { } @Override - public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + boolean commited = false; try { - return fromMResourcePlan(getMWMResourcePlan(name, false)); + openTransaction(); + WMFullResourcePlan fullRp = fullFromMResourcePlan(getMWMResourcePlan(name, false)); + commited = commitTransaction(); + return fullRp; } catch (InvalidOperationException e) { // Should not happen, edit check is false. throw new RuntimeException(e); + } finally { + rollbackAndCleanup(commited, (Query)null); } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 8af96db0bc..fa77f63567 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -756,7 +756,7 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize) throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException; - WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException; + WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException; List getAllResourcePlans() throws MetaException; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 9856f8a195..e1be6b9448 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -2386,7 +2386,8 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int } @Override - public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException { + public WMFullResourcePlan getResourcePlan(String name) + throws NoSuchObjectException, MetaException { return rawStore.getResourcePlan(name); } diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index c0f8b1182f..6eb8fd67db 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -1110,7 +1110,7 @@ struct WMGetResourcePlanRequest { } struct WMGetResourcePlanResponse { - 1: optional WMResourcePlan resourcePlan; + 1: optional WMFullResourcePlan resourcePlan; } struct WMGetAllResourcePlanRequest { diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index e59e3496bf..2aa5551a42 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -949,7 +949,7 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int } @Override - public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException { return objectStore.getResourcePlan(name); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 8be099cbcb..4ec5864699 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -960,7 +960,7 @@ public void createResourcePlan( } @Override - public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException { return null; }