diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 089b88cdb0..9293e47563 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -4438,6 +4438,8 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal LLAP_COLLECT_LOCK_METRICS("hive.llap.lockmetrics.collect", false, "Whether lock metrics (wait times, counts) are collected for LLAP " + "related locks"), + LLAP_LATENCY_METRIC_WINDOW_SIZE("hive.llap.metrics.latency.window.size", 2048, + "The number of samples to keep in the sampling reservoir"), HIVE_TRIGGER_VALIDATION_INTERVAL("hive.trigger.validation.interval", "500ms", new TimeValidator(TimeUnit.MILLISECONDS), diff --git hive-site.xml hive-site.xml new file mode 100644 index 0000000000..ed21692144 --- /dev/null +++ hive-site.xml @@ -0,0 +1,358 @@ + + + + + + hive.server2.thrift.max.worker.threads + 1 + + + hive.server2.thrift.min.worker.threads + 1 + + + datanucleus.deletionPolicy + DataNucleus + + + + + hive.jar.path + /Users/petervary/dev/upstream/hive/ql/target/hive-exec-4.0.0-SNAPSHOT.jar + The location of hive_cli.jar that is used when submitting jobs in a separate jvm. + + + hive.hadoop.classpath + /Users/petervary/dev/upstream/hive/ql/target/hive-exec-4.0.0-SNAPSHOT.jar + + + hive.metastore.local + false + + + metastore.thrift.uris + thrift://localhost:9084 + + + metastore.thrift.port + 9084 + + + hive.server2.thrift.port + 10003 + + + hive.server2.webui.port + 10004 + + + metastore.warehouse.dir + pfile:///Users/petervary/data/apache/hive/warehouse + + + fs.pfile.impl + org.apache.hadoop.fs.ProxyLocalFileSystem + A proxy for local file system used for cross file system testing + + + metastore.expression.proxy + org.apache.hadoop.hive.metastore.DefaultPartitionExpressionProxy + + + metastore.task.threads.always + org.apache.hadoop.hive.metastore.events.EventCleanerTask + + + + + + + + + hive.metastore.schema.verification + false + + + +datanucleus.autoCreateTables +true + + + +datanucleus.schema.autoCreateAll +true + + + + + metastore.metastore.event.db.notification.api.auth + false + + + + + + + + hive.exec.scratchdir + /tmp/hive-${user.name} + + + + + + + + + + + + javax.jdo.option.ConnectionURL + + jdbc:mysql://localhost/upstream_hive + JDBC connect string for a JDBC metastore + + + + hive.metastore.try.direct.sql + true + + + + javax.jdo.option.ConnectionDriverName + com.mysql.jdbc.Driver + + + + javax.jdo.option.ConnectionUserName + upstream_hive + + + + javax.jdo.option.ConnectionPassword + hive + + + + hive.server2.enable.doAs + false + + + + + hive.server2.enable.impersonation + false + + + + + dfs.namenode.acls.enabled + false + + + + + + + + + + + + + diff --git llap-common/src/test/org/apache/hadoop/hive/llap/metrics/MockMetricsCollector.java llap-common/src/test/org/apache/hadoop/hive/llap/metrics/MockMetricsCollector.java new file mode 100644 index 0000000000..c24d3ef0aa --- /dev/null +++ llap-common/src/test/org/apache/hadoop/hive/llap/metrics/MockMetricsCollector.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.llap.metrics; + +import com.google.common.collect.Lists; +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsTag; +import org.apache.hadoop.metrics2.lib.Interns; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Mock metrics collector for this test only. + * This MetricsCollector implementation is used to get the actual + * MetricsSource data, collected by the + * ReadWriteLockMetrics. + */ +public class MockMetricsCollector implements MetricsCollector { + private ArrayList records = new ArrayList<>(); + + /** + * Single metrics record mock implementation. + */ + public static class MockRecord { + private final String recordLabel; ///< record tag/label + private final Map metrics; ///< metrics within record + private String context; ///< collector context ID + + /** + * @param label metrics record label. + */ + public MockRecord(String label) { + recordLabel = label; + metrics = new HashMap<>(); + } + + /** + * @return The record's tag/label. + */ + public String getLabel() { + return recordLabel; + } + + /** + * @return The context of the collector. + */ + public String getContext() { + return context; + } + + /** + * @return Map of identifier/metric value pairs. + */ + public Map getMetrics() { + return metrics; + } + } + + /** + * Record builder mock implementation. + */ + private class MockMetricsRecordBuilder extends MetricsRecordBuilder { + private MockRecord target = null; ///< the record that is populated + private final List tags; + + /** + * Used by outer class to provide a new MetricsRecordBuilder + * for a single metrics record. + * + * @param t The record to build. + */ + MockMetricsRecordBuilder(MockRecord t) { + target = t; + tags = Lists.newArrayList(); + } + + @Override + public MetricsRecordBuilder add(MetricsTag arg0) { + throw new AssertionError("Not implemented for test"); + } + + @Override + public MetricsRecordBuilder add(AbstractMetric arg0) { + throw new AssertionError("Not implemented for test"); + } + + @Override + public MetricsRecordBuilder addCounter(MetricsInfo arg0, int arg1) { + target.getMetrics().put(arg0, arg1); + return this; + } + + @Override + public MetricsRecordBuilder addCounter(MetricsInfo arg0, long arg1) { + target.getMetrics().put(arg0, arg1); + return this; + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo arg0, int arg1) { + target.getMetrics().put(arg0, arg1); + return this; + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo arg0, long arg1) { + target.getMetrics().put(arg0, arg1); + return this; + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo arg0, float arg1) { + throw new AssertionError("Not implemented for test"); + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo arg0, double arg1) { + target.getMetrics().put(arg0, arg1); + return this; + } + + @Override + public MetricsCollector parent() { + return MockMetricsCollector.this; + } + + @Override + public MetricsRecordBuilder setContext(String arg0) { + target.context = arg0; + return this; + } + + @Override + public MetricsRecordBuilder tag(MetricsInfo info, String value) { + tags.add(Interns.tag(info, value)); + return this; + } + } + + @Override + public MetricsRecordBuilder addRecord(String arg0) { + MockRecord tr = new MockRecord(arg0); + records.add(tr); + return new MockMetricsRecordBuilder(tr); + } + + @Override + public MetricsRecordBuilder addRecord(MetricsInfo arg0) { + MockRecord tr = new MockRecord(arg0.name()); + records.add(tr); + return new MockMetricsRecordBuilder(tr); + } + + /** + * @return A list of all built metrics records. + */ + public List getRecords() { + return records; + } +} diff --git llap-common/src/test/org/apache/hadoop/hive/llap/metrics/TestReadWriteLockMetrics.java llap-common/src/test/org/apache/hadoop/hive/llap/metrics/TestReadWriteLockMetrics.java index f48e1f5417..ed33f32697 100644 --- llap-common/src/test/org/apache/hadoop/hive/llap/metrics/TestReadWriteLockMetrics.java +++ llap-common/src/test/org/apache/hadoop/hive/llap/metrics/TestReadWriteLockMetrics.java @@ -31,22 +31,14 @@ import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsTag; import org.junit.Ignore; import org.junit.Test; @@ -137,150 +129,6 @@ public void run() { } } - /** - * Mock metrics collector for this test only. - * This MetricsCollector implementation is used to get the actual - * MetricsSource data, collected by the - * ReadWriteLockMetrics. - */ - private static class MockMetricsCollector implements MetricsCollector { - private ArrayList records = new ArrayList<>(); - - /** - * Single metrics record mock implementation. - */ - public static class MockRecord { - private final String recordLabel; ///< record tag/label - private final HashMap metrics; ///< metrics within record - private String context; ///< collector context ID - - /** - * @param label metrics record label. - */ - public MockRecord(String label) { - recordLabel = label; - metrics = new HashMap<>(); - } - - /** - * @return The record's tag/label. - */ - public String getLabel() { - return recordLabel; - } - - /** - * @return The context of the collector. - */ - public String getContext() { - return context; - } - - /** - * @return Map of identifier/metric value pairs. - */ - public Map getMetrics() { - return metrics; - } - } - - /** - * Record builder mock implementation. - */ - private class MockMetricsRecordBuilder extends MetricsRecordBuilder { - private MockRecord target = null; ///< the record that is populated - - /** - * Used by outer class to provide a new MetricsRecordBuilder - * for a single metrics record. - * - * @param t The record to build. - */ - public MockMetricsRecordBuilder(MockRecord t) { - target = t; - } - - @Override - public MetricsRecordBuilder add(MetricsTag arg0) { - throw new AssertionError("Not implemented for test"); - } - - @Override - public MetricsRecordBuilder add(AbstractMetric arg0) { - throw new AssertionError("Not implemented for test"); - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo arg0, int arg1) { - target.getMetrics().put(arg0, arg1); - return this; - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo arg0, long arg1) { - target.getMetrics().put(arg0, arg1); - return this; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo arg0, int arg1) { - throw new AssertionError("Not implemented for test"); - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo arg0, long arg1) { - throw new AssertionError("Not implemented for test"); - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo arg0, float arg1) { - throw new AssertionError("Not implemented for test"); - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo arg0, double arg1) { - throw new AssertionError("Not implemented for test"); - } - - @Override - public MetricsCollector parent() { - return MockMetricsCollector.this; - } - - @Override - public MetricsRecordBuilder setContext(String arg0) { - target.context = arg0; - return this; - } - - @Override - public MetricsRecordBuilder tag(MetricsInfo arg0, String arg1) { - throw new AssertionError("Not implemented for test"); - } - } - - @Override - public MetricsRecordBuilder addRecord(String arg0) { - MockRecord tr = new MockRecord(arg0); - records.add(tr); - return new MockMetricsRecordBuilder(tr); - } - - @Override - public MetricsRecordBuilder addRecord(MetricsInfo arg0) { - MockRecord tr = new MockRecord(arg0.name()); - records.add(tr); - return new MockMetricsRecordBuilder(tr); - } - - /** - * @return A list of all built metrics records. - */ - public List getRecords() { - return records; - } - } - /** * Helper to verify the actual value by comparing it with a +/- tolerance of * 10% with the expected value. diff --git llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java index cdf767f1db..e2e0a320ee 100644 --- llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java +++ llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java @@ -102,10 +102,12 @@ import org.apache.tez.common.security.JobTokenIdentifier; import org.apache.tez.common.security.JobTokenSecretManager; import org.apache.tez.dag.api.TezUncheckedException; +import org.apache.tez.dag.api.oldrecords.TaskAttemptState; import org.apache.tez.dag.app.dag.DAG; import org.apache.tez.dag.app.dag.TaskAttempt; import org.apache.tez.dag.app.dag.Vertex; import org.apache.tez.dag.app.dag.impl.Edge; +import org.apache.tez.dag.app.dag.impl.TaskAttemptImpl; import org.apache.tez.dag.records.TezDAGID; import org.apache.tez.dag.records.TezTaskAttemptID; import org.apache.tez.dag.records.TezVertexID; @@ -426,9 +428,10 @@ public LlapTaskSchedulerService(TaskSchedulerContext taskSchedulerContext, Clock this.pauseMonitor = new JvmPauseMonitor(conf); pauseMonitor.start(); String displayName = "LlapTaskSchedulerMetrics-" + MetricsUtils.getHostName(); + int latencyMetricWindowSize = HiveConf.getIntVar(conf, ConfVars.LLAP_LATENCY_METRIC_WINDOW_SIZE); String sessionId = conf.get("llap.daemon.metrics.sessionid"); // TODO: Not sure about the use of this. Should we instead use workerIdentity as sessionId? - this.metrics = LlapTaskSchedulerMetrics.create(displayName, sessionId); + this.metrics = LlapTaskSchedulerMetrics.create(displayName, sessionId, latencyMetricWindowSize); } else { this.metrics = null; this.pauseMonitor = null; @@ -1175,6 +1178,9 @@ public boolean deallocateTask( LOG.debug("Processing deallocateTask for task={}, taskSucceeded={}, endReason={}", task, taskSucceeded, endReason); } + if (task instanceof TaskAttemptImpl && metrics != null) { + updateMetrics((TaskAttemptImpl)task); + } boolean isEarlyExit = false; TaskInfo toUpdate = null, taskInfo; writeLock.lock(); // Updating several local structures @@ -3154,4 +3160,22 @@ public void taskInfoUpdated(TezTaskAttemptID attemptId, boolean isGuaranteed) { + attemptId + ", " + newState); sendUpdateMessageAsync(ti, newState); } + + private void updateMetrics(TaskAttemptImpl taskAttempt) { + // Only do it for successful map tasks + if (!TaskAttemptState.SUCCEEDED.equals(taskAttempt.getState()) || !isMapTask(taskAttempt)) { + return; + } + // Check if this task was already assigned to a node + NodeInfo nodeInfo = knownTasks.get(taskAttempt).assignedNode; + if (nodeInfo == null) { + return; + } + + metrics.addTaskLatency(nodeInfo.shortStringBase, taskAttempt.getFinishTime() - taskAttempt.getLaunchTime()); + } + + private boolean isMapTask(TaskAttemptImpl taskAttempt) { + return taskAttempt.getCounters().getGroup("HIVE").findCounter("RECORDS_IN_Map") == null; + } } diff --git llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java index c6b5cc1770..6d0f4013bf 100644 --- llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java +++ llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java @@ -31,10 +31,13 @@ import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName; import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId; +import com.google.common.base.MoreObjects; +import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; import org.apache.hadoop.hive.common.JvmMetrics; import org.apache.hadoop.hive.llap.metrics.LlapMetricsSystem; import org.apache.hadoop.hive.llap.metrics.MetricsUtils; import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; @@ -46,6 +49,9 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeInt; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + /** * Metrics about the llap task scheduler. */ @@ -55,6 +61,9 @@ private final JvmMetrics jvmMetrics; private final String sessionId; private final MetricsRegistry registry; + private final int latencyMetricWindowSize; + // Storing latency metrics for every active daemon Map + private Map daemonTaskLatency = new ConcurrentHashMap<>(); private String dagId = null; @Metric MutableGaugeInt numExecutors; @@ -93,19 +102,22 @@ @Metric MutableCounterInt wmGuaranteedCount; - private LlapTaskSchedulerMetrics(String displayName, JvmMetrics jm, String sessionId) { + private LlapTaskSchedulerMetrics(String displayName, JvmMetrics jm, String sessionId, + int latencyMetricWindowSize) { this.name = displayName; this.jvmMetrics = jm; this.sessionId = sessionId; this.registry = new MetricsRegistry("LlapTaskSchedulerMetricsRegistry"); this.registry.tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId); + this.latencyMetricWindowSize = latencyMetricWindowSize; } - public static LlapTaskSchedulerMetrics create(String displayName, String sessionId) { + public static LlapTaskSchedulerMetrics create(String displayName, String sessionId, + int latencyMetricWindowSize) { MetricsSystem ms = LlapMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create(MetricsUtils.METRICS_PROCESS_NAME, sessionId, ms); return ms.register(displayName, "Llap Task Scheduler Metrics", - new LlapTaskSchedulerMetrics(displayName, jm, sessionId)); + new LlapTaskSchedulerMetrics(displayName, jm, sessionId, latencyMetricWindowSize)); } @Override @@ -254,6 +266,14 @@ public void setWmUnusedGuaranteed(int unusedGuaranteed) { wmUnusedGuaranteedCount.set(unusedGuaranteed); } + public void addTaskLatency(String daemonId, long value) { + daemonTaskLatency.compute(daemonId, (k, v) -> { + v = (v == null ? new DaemonLatencyMetric(daemonId, latencyMetricWindowSize) : v); + v.addValue(value); + return v; + }); + } + public void resetWmMetrics() { wmTotalGuaranteedCount.set(0); wmUnusedGuaranteedCount.set(0); @@ -276,6 +296,43 @@ private void getTaskSchedulerStats(MetricsRecordBuilder rb) { .addCounter(SchedulerPendingPreemptionTaskCount, pendingPreemptionTasksCount.value()) .addCounter(SchedulerPreemptedTaskCount, preemptedTasksCount.value()) .addCounter(SchedulerCompletedDagCount, completedDagcount.value()); + daemonTaskLatency.forEach((k, v) -> rb.addGauge(v, v.getAverage())); + } + + static class DaemonLatencyMetric implements MetricsInfo { + private String name; + private DescriptiveStatistics statistics; + private static final String DESCRIPTION = "Sliding average of task latency / ioMillis"; + + DaemonLatencyMetric(String name, int latencyMetricWindowSize) { + this.name = name; + statistics = new DescriptiveStatistics(latencyMetricWindowSize); + } + + @Override + public String name() { + return this.name; + } + + @Override + public String description() { + return DESCRIPTION; + } + + public void addValue(long value) { + statistics.addValue(value); + } + + public double getAverage() { + return statistics.getSum() / statistics.getN(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", name()).add("description", DESCRIPTION) + .toString(); + } } public JvmMetrics getJvmMetrics() { diff --git llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestLlapTaskSchedulerMetrics.java llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestLlapTaskSchedulerMetrics.java new file mode 100644 index 0000000000..4991491be4 --- /dev/null +++ llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestLlapTaskSchedulerMetrics.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.llap.tezplugins.metrics; + + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.metrics2.MetricsInfo; +import org.junit.Before; +import org.junit.Test; +import org.apache.hadoop.hive.llap.metrics.MockMetricsCollector; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Test the LlapTaskScheduleMetrics methods. + */ +public class TestLlapTaskSchedulerMetrics { + private LlapTaskSchedulerMetrics metrics = null; + + @Before + public void setUp() { + metrics = LlapTaskSchedulerMetrics.create("TestMetrics", "TestSession", 1024); + } + + /** + * Test if adding / removing daemons are working as excepted. + */ + @Test(timeout = 1000000000) + public void testDaemonCount() { + metrics.addTaskLatency("key1", 10); + metrics.addTaskLatency("key2", 20); + metrics.addTaskLatency("key1", 15); + + MockMetricsCollector tmc = null; + Map metricMap = null; + + tmc = new MockMetricsCollector(); + metrics.getMetrics(tmc, true); + metricMap = tmc.getRecords().get(0).getMetrics(); + verifyDaemonMetrics(metricMap, Arrays.asList("key1", "key2")); + } + + private void verifyDaemonMetrics(Map metricsMap, List expectedKeys) { + List foundKeys = new ArrayList<>(expectedKeys.size()); + metricsMap.keySet().forEach(info -> { + if (info instanceof LlapTaskSchedulerMetrics.DaemonLatencyMetric) { + LlapTaskSchedulerMetrics.DaemonLatencyMetric dlm = (LlapTaskSchedulerMetrics.DaemonLatencyMetric)info; + foundKeys.add(dlm.name()); + } + }); + Collections.sort(expectedKeys); + Collections.sort(foundKeys); + assertEquals("Did not found every expected key", expectedKeys, foundKeys); + } +} diff --git llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/package-info.java llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/package-info.java new file mode 100644 index 0000000000..06dba5429d --- /dev/null +++ llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Package consisting the tests for Llap metrics classes. + */ +package org.apache.hadoop.hive.llap.tezplugins.metrics;