diff --git hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index d25d1d9..86e6afe 100644
--- hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -488,13 +488,12 @@
-
+
-
-
-
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index bb7db12..9c8b037 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -90,9 +90,7 @@ public TimelineWriteResponse putEntities(TimelineEntities entities,
}
TimelineCollectorContext context = getTimelineEntityContext();
- return writer.write(context.getClusterId(), context.getUserId(),
- context.getFlowName(), context.getFlowVersion(), context.getFlowRunId(),
- context.getAppId(), entities);
+ return writer.write(context, entities);
}
/**
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
index 4385bbc..8b6fbca 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse.TimelineWriteError;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
/**
@@ -65,24 +66,24 @@
}
@Override
- public TimelineWriteResponse write(String clusterId, String userId,
- String flowName, String flowVersion, long flowRunId, String appId,
+ public TimelineWriteResponse write(TimelineCollectorContext context,
TimelineEntities entities) throws IOException {
TimelineWriteResponse response = new TimelineWriteResponse();
for (TimelineEntity entity : entities.getEntities()) {
- write(clusterId, userId, flowName, flowVersion, flowRunId, appId, entity,
- response);
+ write(context, entity, response);
}
return response;
}
- private synchronized void write(String clusterId, String userId, String flowName,
- String flowVersion, long flowRun, String appId, TimelineEntity entity,
+ private synchronized void write(TimelineCollectorContext context,
+ TimelineEntity entity,
TimelineWriteResponse response) throws IOException {
PrintWriter out = null;
try {
- String dir = mkdirs(outputRoot, ENTITIES_DIR, clusterId, userId,
- escape(flowName), escape(flowVersion), String.valueOf(flowRun), appId,
+ String dir = mkdirs(outputRoot, ENTITIES_DIR, context.getClusterId(),
+ context.getUserId(), escape(context.getFlowName()),
+ escape(context.getFlowVersion()),
+ String.valueOf(context.getFlowRunId()), context.getAppId(),
entity.getType());
String fileName = dir + entity.getId() +
TIMELINE_SERVICE_STORAGE_EXTENSION;
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
index cd2e76e..fd6df06 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
@@ -82,11 +83,16 @@ protected void serviceInit(Configuration conf) throws Exception {
* Stores the entire information in TimelineEntities to the timeline store.
*/
@Override
- public TimelineWriteResponse write(String clusterId, String userId,
- String flowName, String flowVersion, long flowRunId, String appId,
+ public TimelineWriteResponse write(TimelineCollectorContext context,
TimelineEntities data) throws IOException {
TimelineWriteResponse putStatus = new TimelineWriteResponse();
+ String clusterId = context.getClusterId();
+ String userId = context.getUserId();
+ String flowName = context.getFlowName();
+ long flowRunId = context.getFlowRunId();
+ String appId = context.getAppId();
+ String flowVersion = context.getFlowVersion();
for (TimelineEntity te : data.getEntities()) {
// a set can have at most 1 null
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixOfflineAggregationWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixOfflineAggregationWriterImpl.java
new file mode 100644
index 0000000..f7c5e66
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixOfflineAggregationWriterImpl.java
@@ -0,0 +1,361 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.OfflineAggregationInfo;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+@Private
+@Unstable
+public class PhoenixOfflineAggregationWriterImpl extends AbstractService
+ implements TimelineWriter {
+
+ public static final String TIMELINE_SERVICE_PHOENIX_STORAGE_CONN_STR
+ = YarnConfiguration.TIMELINE_SERVICE_PREFIX
+ + "writer.phoenix.connectionString";
+
+ public static final String TIMELINE_SERVICE_PHEONIX_STORAGE_CONN_STR_DEFAULT
+ = "jdbc:phoenix:localhost:2181:/hbase";
+
+ private static final Log LOG
+ = LogFactory.getLog(PhoenixOfflineAggregationWriterImpl.class);
+ private static final String PHOENIX_COL_FAMILY_PLACE_HOLDER
+ = "timeline_cf_placeholder";
+
+ /** Default Phoenix JDBC driver name */
+ private static final String DRIVER_CLASS_NAME
+ = "org.apache.phoenix.jdbc.PhoenixDriver";
+
+ /** Default Phoenix timeline config column family */
+ private static final String METRIC_COLUMN_FAMILY = "m.";
+ /** Default Phoenix timeline info column family */
+ private static final String INFO_COLUMN_FAMILY = "i.";
+ /** Default separator for Phoenix storage */
+ private static final String AGGREGATION_STORAGE_SEPARATOR = ";";
+
+ /** Connection string to the deployed Phoenix cluster */
+ @VisibleForTesting
+ String connString = null;
+ @VisibleForTesting
+ Properties connProperties = new Properties();
+
+ PhoenixOfflineAggregationWriterImpl() {
+ super((PhoenixOfflineAggregationWriterImpl.class.getName()));
+ }
+
+ @Override
+ protected void serviceInit(Configuration conf) throws Exception {
+ // so check it here and only read in the config if it's not overridden.
+ connString =
+ conf.get(TIMELINE_SERVICE_PHOENIX_STORAGE_CONN_STR,
+ TIMELINE_SERVICE_PHEONIX_STORAGE_CONN_STR_DEFAULT);
+ createTables();
+ super.init(conf);
+ }
+
+ @Override
+ protected void serviceStop() throws Exception {
+ super.serviceStop();
+ }
+
+ @Override
+ public TimelineWriteResponse write(TimelineCollectorContext context,
+ TimelineEntities entities) throws IOException {
+ if (context.getFlowName() != null) {
+ // flow level aggregation
+ return writeAggregatedEntity(context, entities,
+ OfflineAggregationInfo.FLOW_AGGREGATION);
+ } else if (context.getUserId() != null) {
+ // user level aggregation
+ return writeAggregatedEntity(context, entities,
+ OfflineAggregationInfo.USER_AGGREGATION);
+ }
+ TimelineWriteResponse response = new TimelineWriteResponse();
+ response.addError(new TimelineWriteResponse.TimelineWriteError());
+ return response;
+ }
+
+ @Override public TimelineWriteResponse aggregate(TimelineEntity data,
+ TimelineAggregationTrack track) throws IOException {
+ return null;
+ }
+
+ @Override public void flush() throws IOException {
+ return;
+ }
+
+ @Private
+ @VisibleForTesting
+ TimelineWriteResponse writeAggregatedEntity(TimelineCollectorContext context,
+ TimelineEntities entities, OfflineAggregationInfo aggregationInfo)
+ throws IOException {
+ TimelineWriteResponse response = new TimelineWriteResponse();
+ String sql = "UPSERT INTO " + aggregationInfo.getTableName()
+ + " (" + StringUtils.join(aggregationInfo.getPrimaryKeyList(), ",")
+ + ", created_time, modified_time, metric_names, info_keys) "
+ + "VALUES ("
+ + StringUtils.repeat("?,", aggregationInfo.getPrimaryKeyList().length)
+ + "?, ?, ?, ?)";
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("TimelineEntity write SQL: " + sql);
+ }
+
+ try (Connection conn = getConnection();
+ PreparedStatement ps = conn.prepareStatement(sql)) {
+ for (TimelineEntity entity : entities.getEntities()) {
+ HashMap formattedMetrics = new HashMap<>();
+ if (entity.getMetrics() != null) {
+ for (TimelineMetric m : entity.getMetrics()) {
+ formattedMetrics.put(m.getId(), m);
+ }
+ }
+ int idx = aggregationInfo.setStringsForPrimaryKey(ps, context, null,
+ 1);
+ ps.setLong(idx++, entity.getCreatedTime());
+ ps.setLong(idx++, entity.getModifiedTime());
+ ps.setString(idx++, StringUtils.join(formattedMetrics.keySet().toArray(),
+ AGGREGATION_STORAGE_SEPARATOR));
+ ps.setString(idx++, StringUtils.join(entity.getInfo().keySet().toArray(),
+ AGGREGATION_STORAGE_SEPARATOR));
+ ps.execute();
+
+ storeEntityVariableLengthFields(entity, formattedMetrics, context,
+ conn, aggregationInfo);
+
+ conn.commit();
+ }
+ } catch (SQLException se) {
+ LOG.error("Failed to add entity to Phoenix " + se.getMessage());
+ throw new IOException(se);
+ } catch (Exception e) {
+ LOG.error("Exception on getting connection: " + e.getMessage());
+ throw new IOException(e);
+ }
+ return response;
+ }
+
+ // Utility functions
+ @Private
+ @VisibleForTesting
+ Connection getConnection() throws IOException {
+ Connection conn;
+ try {
+ Class.forName(DRIVER_CLASS_NAME);
+ conn = DriverManager.getConnection(connString, connProperties);
+ conn.setAutoCommit(false);
+ } catch (SQLException se) {
+ LOG.error("Failed to connect to phoenix server! "
+ + se.getLocalizedMessage());
+ throw new IOException(se);
+ } catch (ClassNotFoundException e) {
+ LOG.error("Class not found! " + e.getLocalizedMessage());
+ throw new IOException(e);
+ }
+ return conn;
+ }
+
+ private void createTables() throws Exception {
+ // Create tables if necessary
+ try (Connection conn = getConnection();
+ Statement stmt = conn.createStatement()) {
+ // Table schema defined as in YARN-3817.
+ String sql = "CREATE TABLE IF NOT EXISTS "
+ + OfflineAggregationInfo.FLOW_AGGREGATION_TABLE_NAME
+ + "(user VARCHAR NOT NULL, cluster VARCHAR NOT NULL, "
+ + "flow_name VARCHAR NOT NULL, "
+ + "created_time UNSIGNED_LONG, modified_time UNSIGNED_LONG, "
+ + INFO_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARBINARY, "
+ + METRIC_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARBINARY, "
+ + "metric_names VARCHAR, info_keys VARCHAR "
+ + "CONSTRAINT pk PRIMARY KEY("
+ + "user, cluster, flow_name))";
+ stmt.executeUpdate(sql);
+ sql = "CREATE TABLE IF NOT EXISTS "
+ + OfflineAggregationInfo.USER_AGGREGATION_TABLE_NAME
+ + "(user VARCHAR NOT NULL, "
+ + "created_time UNSIGNED_LONG, modified_time UNSIGNED_LONG, "
+ + INFO_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARBINARY, "
+ + METRIC_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARBINARY, "
+ + "metric_names VARCHAR, info_keys VARCHAR "
+ + "CONSTRAINT pk PRIMARY KEY(user))";
+ stmt.executeUpdate(sql);
+ conn.commit();
+ } catch (SQLException se) {
+ LOG.error("Failed in init data " + se.getLocalizedMessage());
+ throw se;
+ }
+ return;
+ }
+
+ private static class DynamicColumns {
+ static final String COLUMN_FAMILY_TYPE_BYTES = " VARBINARY";
+ static final String COLUMN_FAMILY_TYPE_STRING = " VARCHAR";
+ String columnFamilyPrefix;
+ String type;
+ Set columns;
+
+ public DynamicColumns(String columnFamilyPrefix, String type,
+ Set keyValues) {
+ this.columnFamilyPrefix = columnFamilyPrefix;
+ this.columns = keyValues;
+ this.type = type;
+ }
+ }
+
+ private static StringBuilder appendColumnsSQL(
+ StringBuilder colNames, DynamicColumns cfInfo) {
+ // Prepare the sql template by iterating through all keys
+ for (K key : cfInfo.columns) {
+ colNames.append(",").append(cfInfo.columnFamilyPrefix)
+ .append(key.toString()).append(cfInfo.type);
+ }
+ return colNames;
+ }
+
+ private static int setValuesForColumnFamily(
+ PreparedStatement ps, Map keyValues, int startPos,
+ boolean converToBytes) throws SQLException {
+ int idx = startPos;
+ for (Map.Entry entry : keyValues.entrySet()) {
+ V value = entry.getValue();
+ if (value instanceof Collection) {
+ ps.setString(idx++, StringUtils.join(
+ (Collection) value, AGGREGATION_STORAGE_SEPARATOR));
+ } else {
+ if (converToBytes) {
+ try {
+ ps.setBytes(idx++, GenericObjectMapper.write(entry.getValue()));
+ } catch (IOException ie) {
+ LOG.error("Exception in converting values into bytes "
+ + ie.getMessage());
+ throw new SQLException(ie);
+ }
+ } else {
+ ps.setString(idx++, value.toString());
+ }
+ }
+ }
+ return idx;
+ }
+
+ private static int setBytesForColumnFamily(
+ PreparedStatement ps, Map keyValues, int startPos)
+ throws SQLException {
+ return setValuesForColumnFamily(ps, keyValues, startPos, true);
+ }
+
+ private static int setStringsForColumnFamily(
+ PreparedStatement ps, Map keyValues, int startPos)
+ throws SQLException {
+ return setValuesForColumnFamily(ps, keyValues, startPos, false);
+ }
+
+ private static void storeEntityVariableLengthFields(TimelineEntity entity,
+ Map formattedMetrics,
+ TimelineCollectorContext context, Connection conn,
+ OfflineAggregationInfo aggregationInfo) throws SQLException {
+ int numPlaceholders = 0;
+ StringBuilder columnDefs = new StringBuilder(
+ StringUtils.join(aggregationInfo.getPrimaryKeyList(), ","));
+ if (entity.getInfo() != null) {
+ Set keySet = entity.getInfo().keySet();
+ appendColumnsSQL(columnDefs, new DynamicColumns<>(
+ INFO_COLUMN_FAMILY, DynamicColumns.COLUMN_FAMILY_TYPE_BYTES,
+ keySet));
+ numPlaceholders += keySet.size();
+ }
+ if (formattedMetrics != null && formattedMetrics.size() > 0) {
+ appendColumnsSQL(columnDefs, new DynamicColumns<>(
+ METRIC_COLUMN_FAMILY, DynamicColumns.COLUMN_FAMILY_TYPE_BYTES,
+ formattedMetrics.keySet()));
+ numPlaceholders += formattedMetrics.keySet().size();
+ }
+ if (numPlaceholders == 0) {
+ return;
+ }
+ StringBuilder placeholders = new StringBuilder();
+ placeholders.append(
+ StringUtils.repeat("?,", aggregationInfo.getPrimaryKeyList().length));
+ // numPlaceholders >= 1 now
+ placeholders.append("?")
+ .append(StringUtils.repeat(",?", numPlaceholders - 1));
+ String sqlVariableLengthFields = new StringBuilder("UPSERT INTO ")
+ .append(aggregationInfo.getTableName()).append(" (").append(columnDefs)
+ .append(") VALUES(").append(placeholders).append(")").toString();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("SQL statement for variable length fields: "
+ + sqlVariableLengthFields);
+ }
+ // Use try with resource statement for the prepared statement
+ try (PreparedStatement psVariableLengthFields =
+ conn.prepareStatement(sqlVariableLengthFields)) {
+ int idx = aggregationInfo.setStringsForPrimaryKey(
+ psVariableLengthFields, context, null, 1);
+ if (entity.getInfo() != null) {
+ idx = setBytesForColumnFamily(
+ psVariableLengthFields, entity.getInfo(), idx);
+ }
+ if (formattedMetrics != null && formattedMetrics.size() > 0) {
+ idx = setBytesForColumnFamily(
+ psVariableLengthFields, formattedMetrics, idx);
+ }
+ psVariableLengthFields.execute();
+ }
+ }
+
+ // WARNING: This method will permanently drop a table!
+ @Private
+ @VisibleForTesting
+ void dropTable(String tableName) throws Exception {
+ try (Connection conn = getConnection();
+ Statement stmt = conn.createStatement()) {
+ String sql = "DROP TABLE " + tableName;
+ stmt.executeUpdate(sql);
+ } catch (SQLException se) {
+ LOG.error("Failed in dropping entity table " + se.getLocalizedMessage());
+ throw se;
+ }
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixTimelineWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixTimelineWriterImpl.java
deleted file mode 100644
index 381ff17..0000000
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixTimelineWriterImpl.java
+++ /dev/null
@@ -1,530 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
-import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-@Private
-@Unstable
-public class PhoenixTimelineWriterImpl extends AbstractService
- implements TimelineWriter {
-
- public static final String TIMELINE_SERVICE_PHOENIX_STORAGE_CONN_STR
- = YarnConfiguration.TIMELINE_SERVICE_PREFIX
- + "writer.phoenix.connectionString";
-
- public static final String TIMELINE_SERVICE_PHEONIX_STORAGE_CONN_STR_DEFAULT
- = "jdbc:phoenix:localhost:2181:/hbase";
-
- private static final Log LOG
- = LogFactory.getLog(PhoenixTimelineWriterImpl.class);
- private static final String PHOENIX_COL_FAMILY_PLACE_HOLDER
- = "timeline_cf_placeholder";
- // These lists are not taking effects in table creations.
- private static final String[] PHOENIX_STORAGE_PK_LIST
- = {"cluster", "user", "flow_name", "flow_version", "flow_run", "app_id",
- "type", "entity_id"};
- private static final String[] TIMELINE_EVENT_EXTRA_PK_LIST =
- {"timestamp", "event_id"};
- private static final String[] TIMELINE_METRIC_EXTRA_PK_LIST =
- {"metric_id"};
- /** Default Phoenix JDBC driver name */
- private static final String DRIVER_CLASS_NAME
- = "org.apache.phoenix.jdbc.PhoenixDriver";
-
- /** Default Phoenix timeline entity table name */
- @VisibleForTesting
- static final String ENTITY_TABLE_NAME = "timeline_entity";
- /** Default Phoenix event table name */
- @VisibleForTesting
- static final String EVENT_TABLE_NAME = "timeline_event";
- /** Default Phoenix metric table name */
- @VisibleForTesting
- static final String METRIC_TABLE_NAME = "metric_singledata";
-
- /** Default Phoenix timeline config column family */
- private static final String CONFIG_COLUMN_FAMILY = "c.";
- /** Default Phoenix timeline info column family */
- private static final String INFO_COLUMN_FAMILY = "i.";
- /** Default Phoenix event info column family */
- private static final String EVENT_INFO_COLUMN_FAMILY = "ei.";
- /** Default Phoenix isRelatedTo column family */
- private static final String IS_RELATED_TO_FAMILY = "ir.";
- /** Default Phoenix relatesTo column family */
- private static final String RELATES_TO_FAMILY = "rt.";
- /** Default separator for Phoenix storage */
- private static final String PHOENIX_STORAGE_SEPARATOR = ";";
-
- /** Connection string to the deployed Phoenix cluster */
- @VisibleForTesting
- String connString = null;
- @VisibleForTesting
- Properties connProperties = new Properties();
-
- PhoenixTimelineWriterImpl() {
- super((PhoenixTimelineWriterImpl.class.getName()));
- }
-
- @Override
- protected void serviceInit(Configuration conf) throws Exception {
- // so check it here and only read in the config if it's not overridden.
- connString =
- conf.get(TIMELINE_SERVICE_PHOENIX_STORAGE_CONN_STR,
- TIMELINE_SERVICE_PHEONIX_STORAGE_CONN_STR_DEFAULT);
- createTables();
- super.init(conf);
- }
-
- @Override
- protected void serviceStop() throws Exception {
- super.serviceStop();
- }
-
- @Override
- public TimelineWriteResponse write(String clusterId, String userId,
- String flowName, String flowVersion, long flowRunId, String appId,
- TimelineEntities entities) throws IOException {
- TimelineWriteResponse response = new TimelineWriteResponse();
- TimelineCollectorContext currContext = new TimelineCollectorContext(
- clusterId, userId, flowName, flowVersion, flowRunId, appId);
- String sql = "UPSERT INTO " + ENTITY_TABLE_NAME
- + " (" + StringUtils.join(PHOENIX_STORAGE_PK_LIST, ",")
- + ", creation_time, modified_time, configs) "
- + "VALUES (" + StringUtils.repeat("?,", PHOENIX_STORAGE_PK_LIST.length)
- + "?, ?, ?)";
- if (LOG.isDebugEnabled()) {
- LOG.debug("TimelineEntity write SQL: " + sql);
- }
-
- try (Connection conn = getConnection();
- PreparedStatement ps = conn.prepareStatement(sql)) {
- for (TimelineEntity entity : entities.getEntities()) {
- int idx = setStringsForPrimaryKey(ps, currContext, entity, 1);
- ps.setLong(idx++, entity.getCreatedTime());
- ps.setLong(idx++, entity.getModifiedTime());
- String configKeys = StringUtils.join(
- entity.getConfigs().keySet(), PHOENIX_STORAGE_SEPARATOR);
- ps.setString(idx++, configKeys);
- ps.execute();
-
- storeEntityVariableLengthFields(entity, currContext, conn);
- storeEvents(entity, currContext, conn);
- storeMetrics(entity, currContext, conn);
-
- conn.commit();
- }
- } catch (SQLException se) {
- LOG.error("Failed to add entity to Phoenix " + se.getMessage());
- throw new IOException(se);
- } catch (Exception e) {
- LOG.error("Exception on getting connection: " + e.getMessage());
- throw new IOException(e);
- }
- return response;
- }
-
- /**
- * Aggregates the entity information to the timeline store based on which
- * track this entity is to be rolled up to The tracks along which aggregations
- * are to be done are given by {@link TimelineAggregationTrack}
- *
- * Any errors occurring for individual write request objects will be reported
- * in the response.
- *
- * @param data
- * a {@link TimelineEntity} object
- * a {@link TimelineAggregationTrack} enum value
- * @return a {@link TimelineWriteResponse} object.
- * @throws IOException
- */
- @Override
- public TimelineWriteResponse aggregate(TimelineEntity data,
- TimelineAggregationTrack track) throws IOException {
- return null;
-
- }
-
- @Override
- public void flush() throws IOException {
- // currently no-op
- }
-
- // Utility functions
- @Private
- @VisibleForTesting
- Connection getConnection() throws IOException {
- Connection conn;
- try {
- Class.forName(DRIVER_CLASS_NAME);
- conn = DriverManager.getConnection(connString, connProperties);
- conn.setAutoCommit(false);
- } catch (SQLException se) {
- LOG.error("Failed to connect to phoenix server! "
- + se.getLocalizedMessage());
- throw new IOException(se);
- } catch (ClassNotFoundException e) {
- LOG.error("Class not found! " + e.getLocalizedMessage());
- throw new IOException(e);
- }
- return conn;
- }
-
- private void createTables() throws Exception {
- // Create tables if necessary
- try (Connection conn = getConnection();
- Statement stmt = conn.createStatement()) {
- // Table schema defined as in YARN-3134.
- String sql = "CREATE TABLE IF NOT EXISTS " + ENTITY_TABLE_NAME
- + "(user VARCHAR NOT NULL, cluster VARCHAR NOT NULL, "
- + "flow_name VARCHAR NOT NULL, flow_version VARCHAR NOT NULL, "
- + "flow_run UNSIGNED_LONG NOT NULL, "
- + "app_id VARCHAR NOT NULL, type VARCHAR NOT NULL, "
- + "entity_id VARCHAR NOT NULL, "
- + "creation_time UNSIGNED_LONG, modified_time UNSIGNED_LONG, "
- + "configs VARCHAR, "
- + CONFIG_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARCHAR, "
- + INFO_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARBINARY, "
- + IS_RELATED_TO_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARCHAR, "
- + RELATES_TO_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARCHAR "
- + "CONSTRAINT pk PRIMARY KEY("
- + "user, cluster, flow_name, flow_version, flow_run DESC, app_id, "
- + "type, entity_id))";
- stmt.executeUpdate(sql);
- sql = "CREATE TABLE IF NOT EXISTS " + EVENT_TABLE_NAME
- + "(user VARCHAR NOT NULL, cluster VARCHAR NOT NULL, "
- + "flow_name VARCHAR NOT NULL, flow_version VARCHAR NOT NULL, "
- + "flow_run UNSIGNED_LONG NOT NULL, "
- + "app_id VARCHAR NOT NULL, type VARCHAR NOT NULL, "
- + "entity_id VARCHAR NOT NULL, "
- + "timestamp UNSIGNED_LONG NOT NULL, event_id VARCHAR NOT NULL, "
- + EVENT_INFO_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER + " VARBINARY "
- + "CONSTRAINT pk PRIMARY KEY("
- + "user, cluster, flow_name, flow_version, flow_run DESC, app_id, "
- + "type, entity_id, timestamp DESC, event_id))";
- stmt.executeUpdate(sql);
- sql = "CREATE TABLE IF NOT EXISTS " + METRIC_TABLE_NAME
- + "(user VARCHAR NOT NULL, cluster VARCHAR NOT NULL, "
- + "flow_name VARCHAR NOT NULL, flow_version VARCHAR NOT NULL, "
- + "flow_run UNSIGNED_LONG NOT NULL, "
- + "app_id VARCHAR NOT NULL, type VARCHAR NOT NULL, "
- + "entity_id VARCHAR NOT NULL, "
- + "metric_id VARCHAR NOT NULL, "
- + "singledata VARBINARY, "
- + "time UNSIGNED_LONG "
- + "CONSTRAINT pk PRIMARY KEY("
- + "user, cluster, flow_name, flow_version, flow_run DESC, app_id, "
- + "type, entity_id, metric_id))";
- stmt.executeUpdate(sql);
- conn.commit();
- } catch (SQLException se) {
- LOG.error("Failed in init data " + se.getLocalizedMessage());
- throw se;
- }
- return;
- }
-
- private static class DynamicColumns {
- static final String COLUMN_FAMILY_TYPE_BYTES = " VARBINARY";
- static final String COLUMN_FAMILY_TYPE_STRING = " VARCHAR";
- String columnFamilyPrefix;
- String type;
- Set columns;
-
- public DynamicColumns(String columnFamilyPrefix, String type,
- Set keyValues) {
- this.columnFamilyPrefix = columnFamilyPrefix;
- this.columns = keyValues;
- this.type = type;
- }
- }
-
- private static StringBuilder appendColumnsSQL(
- StringBuilder colNames, DynamicColumns cfInfo) {
- // Prepare the sql template by iterating through all keys
- for (K key : cfInfo.columns) {
- colNames.append(",").append(cfInfo.columnFamilyPrefix)
- .append(key.toString()).append(cfInfo.type);
- }
- return colNames;
- }
-
- private static int setValuesForColumnFamily(
- PreparedStatement ps, Map keyValues, int startPos,
- boolean converToBytes) throws SQLException {
- int idx = startPos;
- for (Map.Entry entry : keyValues.entrySet()) {
- V value = entry.getValue();
- if (value instanceof Collection) {
- ps.setString(idx++, StringUtils.join(
- (Collection) value, PHOENIX_STORAGE_SEPARATOR));
- } else {
- if (converToBytes) {
- try {
- ps.setBytes(idx++, GenericObjectMapper.write(entry.getValue()));
- } catch (IOException ie) {
- LOG.error("Exception in converting values into bytes "
- + ie.getMessage());
- throw new SQLException(ie);
- }
- } else {
- ps.setString(idx++, value.toString());
- }
- }
- }
- return idx;
- }
-
- private static int setBytesForColumnFamily(
- PreparedStatement ps, Map keyValues, int startPos)
- throws SQLException {
- return setValuesForColumnFamily(ps, keyValues, startPos, true);
- }
-
- private static int setStringsForColumnFamily(
- PreparedStatement ps, Map keyValues, int startPos)
- throws SQLException {
- return setValuesForColumnFamily(ps, keyValues, startPos, false);
- }
-
- private static int setStringsForPrimaryKey(PreparedStatement ps,
- TimelineCollectorContext context, TimelineEntity entity, int startPos)
- throws SQLException {
- int idx = startPos;
- ps.setString(idx++, context.getClusterId());
- ps.setString(idx++, context.getUserId());
- ps.setString(idx++,
- context.getFlowName());
- ps.setString(idx++, context.getFlowVersion());
- ps.setLong(idx++, context.getFlowRunId());
- ps.setString(idx++, context.getAppId());
- ps.setString(idx++, entity.getType());
- ps.setString(idx++, entity.getId());
- return idx;
- }
-
- private static void storeEntityVariableLengthFields(TimelineEntity entity,
- TimelineCollectorContext context, Connection conn) throws SQLException {
- int numPlaceholders = 0;
- StringBuilder columnDefs = new StringBuilder(
- StringUtils.join(PHOENIX_STORAGE_PK_LIST, ","));
- if (entity.getConfigs() != null) {
- Set keySet = entity.getConfigs().keySet();
- appendColumnsSQL(columnDefs, new DynamicColumns<>(
- CONFIG_COLUMN_FAMILY, DynamicColumns.COLUMN_FAMILY_TYPE_STRING,
- keySet));
- numPlaceholders += keySet.size();
- }
- if (entity.getInfo() != null) {
- Set keySet = entity.getInfo().keySet();
- appendColumnsSQL(columnDefs, new DynamicColumns<>(
- INFO_COLUMN_FAMILY, DynamicColumns.COLUMN_FAMILY_TYPE_BYTES,
- keySet));
- numPlaceholders += keySet.size();
- }
- if (entity.getIsRelatedToEntities() != null) {
- Set keySet = entity.getIsRelatedToEntities().keySet();
- appendColumnsSQL(columnDefs, new DynamicColumns<>(
- IS_RELATED_TO_FAMILY, DynamicColumns.COLUMN_FAMILY_TYPE_STRING,
- keySet));
- numPlaceholders += keySet.size();
- }
- if (entity.getRelatesToEntities() != null) {
- Set keySet = entity.getRelatesToEntities().keySet();
- appendColumnsSQL(columnDefs, new DynamicColumns<>(
- RELATES_TO_FAMILY, DynamicColumns.COLUMN_FAMILY_TYPE_STRING,
- keySet));
- numPlaceholders += keySet.size();
- }
- if (numPlaceholders == 0) {
- return;
- }
- StringBuilder placeholders = new StringBuilder();
- placeholders.append(
- StringUtils.repeat("?,", PHOENIX_STORAGE_PK_LIST.length));
- // numPlaceholders >= 1 now
- placeholders.append("?")
- .append(StringUtils.repeat(",?", numPlaceholders - 1));
- String sqlVariableLengthFields = new StringBuilder("UPSERT INTO ")
- .append(ENTITY_TABLE_NAME).append(" (").append(columnDefs)
- .append(") VALUES(").append(placeholders).append(")").toString();
- if (LOG.isDebugEnabled()) {
- LOG.debug("SQL statement for variable length fields: "
- + sqlVariableLengthFields);
- }
- // Use try with resource statement for the prepared statement
- try (PreparedStatement psVariableLengthFields =
- conn.prepareStatement(sqlVariableLengthFields)) {
- int idx = setStringsForPrimaryKey(
- psVariableLengthFields, context, entity, 1);
- if (entity.getConfigs() != null) {
- idx = setStringsForColumnFamily(
- psVariableLengthFields, entity.getConfigs(), idx);
- }
- if (entity.getInfo() != null) {
- idx = setBytesForColumnFamily(
- psVariableLengthFields, entity.getInfo(), idx);
- }
- if (entity.getIsRelatedToEntities() != null) {
- idx = setStringsForColumnFamily(
- psVariableLengthFields, entity.getIsRelatedToEntities(), idx);
- }
- if (entity.getRelatesToEntities() != null) {
- idx = setStringsForColumnFamily(
- psVariableLengthFields, entity.getRelatesToEntities(), idx);
- }
- psVariableLengthFields.execute();
- }
- }
-
- private static void storeMetrics(TimelineEntity entity,
- TimelineCollectorContext context, Connection conn) throws SQLException {
- if (entity.getMetrics() == null) {
- return;
- }
- Set metrics = entity.getMetrics();
- for (TimelineMetric metric : metrics) {
- StringBuilder sqlColumns = new StringBuilder(
- StringUtils.join(PHOENIX_STORAGE_PK_LIST, ","));
- sqlColumns.append(",")
- .append(StringUtils.join(TIMELINE_METRIC_EXTRA_PK_LIST, ","));
- sqlColumns.append(",").append("singledata, time");
- StringBuilder placeholders = new StringBuilder();
- placeholders.append(
- StringUtils.repeat("?,", PHOENIX_STORAGE_PK_LIST.length))
- .append(StringUtils.repeat("?,", TIMELINE_METRIC_EXTRA_PK_LIST.length));
- placeholders.append("?, ?");
- String sqlMetric = new StringBuilder("UPSERT INTO ")
- .append(METRIC_TABLE_NAME).append(" (").append(sqlColumns)
- .append(") VALUES(").append(placeholders).append(")").toString();
- if (LOG.isDebugEnabled()) {
- LOG.debug("SQL statement for metric: " + sqlMetric);
- }
- try (PreparedStatement psMetrics = conn.prepareStatement(sqlMetric)) {
- if (metric.getType().equals(TimelineMetric.Type.TIME_SERIES)) {
- LOG.warn("The incoming timeline metric contains time series data, "
- + "which is currently not supported by Phoenix storage. "
- + "Time series will be truncated. ");
- }
- int idx = setStringsForPrimaryKey(psMetrics, context, entity, 1);
- psMetrics.setString(idx++, metric.getId());
- Iterator> currNumIter =
- metric.getValues().entrySet().iterator();
- if (currNumIter.hasNext()) {
- // TODO: support time series storage
- Map.Entry currEntry = currNumIter.next();
- psMetrics.setBytes(idx++,
- GenericObjectMapper.write(currEntry.getValue()));
- psMetrics.setLong(idx++, currEntry.getKey());
- } else {
- psMetrics.setBytes(idx++, GenericObjectMapper.write(null));
- LOG.warn("The incoming metric contains an empty value set. ");
- }
- psMetrics.execute();
- } catch (IOException ie) {
- LOG.error("Exception on converting single data to bytes: "
- + ie.getMessage());
- throw new SQLException(ie);
- }
- }
- }
-
- private static void storeEvents(TimelineEntity entity,
- TimelineCollectorContext context, Connection conn) throws SQLException {
- if (entity.getEvents() == null) {
- return;
- }
- Set events = entity.getEvents();
- for (TimelineEvent event : events) {
- // We need this number to check if the incoming event's info field is empty
- int numPlaceholders = 0;
- StringBuilder sqlColumns = new StringBuilder(
- StringUtils.join(PHOENIX_STORAGE_PK_LIST, ","));
- sqlColumns.append(",")
- .append(StringUtils.join(TIMELINE_EVENT_EXTRA_PK_LIST, ","));
- appendColumnsSQL(sqlColumns, new DynamicColumns<>(
- EVENT_INFO_COLUMN_FAMILY, DynamicColumns.COLUMN_FAMILY_TYPE_BYTES,
- event.getInfo().keySet()));
- numPlaceholders += event.getInfo().keySet().size();
- if (numPlaceholders == 0) {
- continue;
- }
- StringBuilder placeholders = new StringBuilder();
- placeholders.append(
- StringUtils.repeat("?,", PHOENIX_STORAGE_PK_LIST.length))
- .append(StringUtils.repeat("?,", TIMELINE_EVENT_EXTRA_PK_LIST.length));
- // numPlaceholders >= 1 now
- placeholders.append("?")
- .append(StringUtils.repeat(",?", numPlaceholders - 1));
- String sqlEvents = new StringBuilder("UPSERT INTO ")
- .append(EVENT_TABLE_NAME).append(" (").append(sqlColumns)
- .append(") VALUES(").append(placeholders).append(")").toString();
- if (LOG.isDebugEnabled()) {
- LOG.debug("SQL statement for events: " + sqlEvents);
- }
- try (PreparedStatement psEvent = conn.prepareStatement(sqlEvents)) {
- int idx = setStringsForPrimaryKey(psEvent, context, entity, 1);
- psEvent.setLong(idx++, event.getTimestamp());
- psEvent.setString(idx++, event.getId());
- setBytesForColumnFamily(psEvent, event.getInfo(), idx);
- psEvent.execute();
- }
- }
- }
-
- // WARNING: This method will permanently drop a table!
- @Private
- @VisibleForTesting
- void dropTable(String tableName) throws Exception {
- try (Connection conn = getConnection();
- Statement stmt = conn.createStatement()) {
- String sql = "DROP TABLE " + tableName;
- stmt.executeUpdate(sql);
- } catch (SQLException se) {
- LOG.error("Failed in dropping entity table " + se.getLocalizedMessage());
- throw se;
- }
- }
-}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
index 50136de..d14175d 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
/**
* This interface is for storing application timeline information.
@@ -38,19 +39,14 @@
* timeline store. Any errors occurring for individual write request objects
* will be reported in the response.
*
- * @param clusterId context cluster ID
- * @param userId context user ID
- * @param flowName context flow name
- * @param flowVersion context flow version
- * @param flowRunId
- * @param appId context app ID
+ * @param context context information (cluster, user, flow, and app info), a
+ * {@link TimelineCollectorContext} object
* @param data
* a {@link TimelineEntities} object.
* @return a {@link TimelineWriteResponse} object.
* @throws IOException
*/
- TimelineWriteResponse write(String clusterId, String userId,
- String flowName, String flowVersion, long flowRunId, String appId,
+ TimelineWriteResponse write(TimelineCollectorContext context,
TimelineEntities data) throws IOException;
/**
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/OfflineAggregationInfo.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/OfflineAggregationInfo.java
new file mode 100644
index 0000000..a851fcb
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/OfflineAggregationInfo.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public final class OfflineAggregationInfo {
+ /**
+ * Default flow level aggregation table name
+ */
+ @VisibleForTesting
+ public static final String FLOW_AGGREGATION_TABLE_NAME
+ = "yarn_timeline_flow_aggregation";
+ /**
+ * Default user level aggregation table name
+ */
+ public static final String USER_AGGREGATION_TABLE_NAME
+ = "yarn_timeline_user_aggregation";
+
+ // These lists are not taking effects in table creations.
+ private static final String[] FLOW_AGGREGATION_PK_LIST =
+ { "user", "cluster", "flow_name" };
+ private static final String[] USER_AGGREGATION_PK_LIST = { "user" };
+
+ private String tableName;
+ private String[] primaryKeyList;
+ private PrimaryKeyStringSetter primaryKeyStringSetter;
+
+ private OfflineAggregationInfo(String table, String[] pkList,
+ PrimaryKeyStringSetter formatter) {
+ tableName = table;
+ primaryKeyList = pkList;
+ primaryKeyStringSetter = formatter;
+ }
+
+ private interface PrimaryKeyStringSetter {
+ int setValues(PreparedStatement ps, TimelineCollectorContext context,
+ String[] extraInfo, int startPos) throws SQLException;
+ }
+
+ public String getTableName() {
+ return tableName;
+ }
+
+ public String[] getPrimaryKeyList() {
+ return primaryKeyList.clone();
+ }
+
+ public int setStringsForPrimaryKey(PreparedStatement ps,
+ TimelineCollectorContext context, String[] extraInfo, int startPos)
+ throws SQLException {
+ return primaryKeyStringSetter.setValues(ps, context, extraInfo, startPos);
+ }
+
+ public static final OfflineAggregationInfo FLOW_AGGREGATION =
+ new OfflineAggregationInfo(FLOW_AGGREGATION_TABLE_NAME,
+ FLOW_AGGREGATION_PK_LIST, new PrimaryKeyStringSetter() {
+ @Override
+ public int setValues(PreparedStatement ps,
+ TimelineCollectorContext context, String[] extraInfo, int startPos)
+ throws SQLException {
+ int idx = startPos;
+ ps.setString(idx++, context.getUserId());
+ ps.setString(idx++, context.getClusterId());
+ ps.setString(idx++, context.getFlowName());
+ return idx;
+ }
+ });
+
+ public static final OfflineAggregationInfo USER_AGGREGATION =
+ new OfflineAggregationInfo(USER_AGGREGATION_TABLE_NAME,
+ USER_AGGREGATION_PK_LIST, new PrimaryKeyStringSetter() {
+ @Override
+ public int setValues(PreparedStatement ps,
+ TimelineCollectorContext context, String[] extraInfo, int startPos)
+ throws SQLException {
+ int idx = startPos;
+ ps.setString(idx++, context.getUserId());
+ return idx;
+ }
+ });
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
index 50a9f60..8891a71 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.junit.Test;
@@ -54,11 +55,13 @@ public void testWriteEntityToFile() throws Exception {
FileSystemTimelineWriterImpl fsi = null;
try {
+ TimelineCollectorContext context = new TimelineCollectorContext(
+ "cluster_id", "user_id", "flow_name", "flow_version", 12345678L,
+ "app_id");
fsi = new FileSystemTimelineWriterImpl();
fsi.init(new YarnConfiguration());
fsi.start();
- fsi.write("cluster_id", "user_id", "flow_name", "flow_version", 12345678L,
- "app_id", te);
+ fsi.write(context, te);
String fileName = fsi.getOutputRoot() +
"/entities/cluster_id/user_id/flow_name/flow_version/12345678/app_id/" +
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
index 31cb5d2..f2d8f11 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
@@ -43,10 +43,9 @@
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
-import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
@@ -148,7 +147,9 @@ public void testWriteEntityToHBase() throws Exception {
String flowVersion = "AB7822C10F1111";
long runid = 1002345678919L;
String appName = "some app name";
- hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+ TimelineCollectorContext context = new TimelineCollectorContext(cluster,
+ user, flow, flowVersion, runid, appName);
+ hbi.write(context, te);
hbi.stop();
// scan the table and see that entity exists
@@ -310,7 +311,9 @@ private void testAdditionalEntity() throws IOException {
String flowVersion = "1111F01C2287BA";
long runid = 1009876543218L;
String appName = "some app name";
- hbi.write(cluster, user, flow, flowVersion, runid, appName, entities);
+ TimelineCollectorContext context = new TimelineCollectorContext(cluster,
+ user, flow, flowVersion, runid, appName);
+ hbi.write(context, entities);
hbi.stop();
// scan the table and see that entity exists
Scan s = new Scan();
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
new file mode 100644
index 0000000..30b013c
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.OfflineAggregationInfo;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+
+public class TestPhoenixOfflineAggregationWriterImpl extends BaseTest {
+ private static PhoenixOfflineAggregationWriterImpl writer;
+ private static final int BATCH_SIZE = 3;
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ YarnConfiguration conf = new YarnConfiguration();
+ writer = setupPhoenixClusterAndWriterForTest(conf);
+ }
+
+ @Test(timeout = 90000)
+ public void testFlowLevelAggregationStorage() throws Exception {
+ testAggregator(OfflineAggregationInfo.FLOW_AGGREGATION);
+ }
+
+ @Test(timeout = 90000)
+ public void testUserLevelAggregationStorage() throws Exception {
+ testAggregator(OfflineAggregationInfo.USER_AGGREGATION);
+ }
+
+ @AfterClass
+ public static void cleanup() throws Exception {
+ writer.dropTable(OfflineAggregationInfo.FLOW_AGGREGATION_TABLE_NAME);
+ writer.dropTable(OfflineAggregationInfo.USER_AGGREGATION_TABLE_NAME);
+ writer.serviceStop();
+ tearDownMiniCluster();
+ }
+
+ private static PhoenixOfflineAggregationWriterImpl setupPhoenixClusterAndWriterForTest(
+ YarnConfiguration conf) throws Exception{
+ Map props = new HashMap<>();
+ // Must update config before starting server
+ props.put(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
+ Boolean.FALSE.toString());
+ props.put("java.security.krb5.realm", "");
+ props.put("java.security.krb5.kdc", "");
+ props.put(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER,
+ Boolean.FALSE.toString());
+ props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(5000));
+ props.put(IndexWriterUtils.HTABLE_THREAD_KEY, Integer.toString(100));
+ // Make a small batch size to test multiple calls to reserve sequences
+ props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB,
+ Long.toString(BATCH_SIZE));
+ // Must update config before starting server
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+
+ PhoenixOfflineAggregationWriterImpl
+ myWriter = new PhoenixOfflineAggregationWriterImpl();
+ // Change connection settings for test
+ conf.set(
+ PhoenixOfflineAggregationWriterImpl.TIMELINE_SERVICE_PHOENIX_STORAGE_CONN_STR,
+ getUrl());
+ myWriter.connProperties = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ myWriter.serviceInit(conf);
+ return myWriter;
+ }
+
+ private static TimelineEntity getTestAggregationTimelineEntity() {
+ TimelineEntity entity = new TimelineEntity();
+ String id = "hello1";
+ String type = "testAggregationType";
+ entity.setId(id);
+ entity.setType(type);
+ entity.setCreatedTime(1425016501000L);
+ entity.setModifiedTime(1425016502000L);
+
+ entity.addInfo("info1", new Integer(1));
+ entity.addInfo("info2", "helloworld");
+
+ TimelineMetric metric = new TimelineMetric();
+ metric.setId("HDFS_BYTES_READ");
+ metric.addValue(1425016501100L, 8000);
+ entity.addMetric(metric);
+
+ return entity;
+ }
+
+ private void testAggregator(OfflineAggregationInfo aggregationInfo)
+ throws Exception {
+ // Set up a list of timeline entities and write them back to Phoenix
+ int numEntity = 1;
+ TimelineEntities te = new TimelineEntities();
+ te.addEntity(getTestAggregationTimelineEntity());
+ TimelineCollectorContext context = new TimelineCollectorContext("cluster_1",
+ "user1", "testFlow", null, 0, null);
+ writer.writeAggregatedEntity(context, te,
+ aggregationInfo);
+
+ // Verify if we're storing all entities
+ String[] primaryKeyList = aggregationInfo.getPrimaryKeyList();
+ String sql = "SELECT COUNT(" + primaryKeyList[primaryKeyList.length - 1]
+ +") FROM " + aggregationInfo.getTableName();
+ verifySQLWithCount(sql, numEntity, "Number of entities should be ");
+ // Check info (half of all entities)
+ sql = "SELECT COUNT(i.info1) FROM "
+ + aggregationInfo.getTableName() + "(i.info1 VARBINARY) ";
+ verifySQLWithCount(sql, numEntity,
+ "Number of entities with info should be ");
+ sql = "SELECT COUNT(m.HDFS_BYTES_READ) FROM "
+ + aggregationInfo.getTableName() + "(m.HDFS_BYTES_READ VARBINARY) ";
+ verifySQLWithCount(sql, numEntity,
+ "Number of entities with info should be ");
+ }
+
+
+ private void verifySQLWithCount(String sql, int targetCount, String message)
+ throws Exception {
+ try (
+ Statement stmt =
+ writer.getConnection().createStatement();
+ ResultSet rs = stmt.executeQuery(sql)) {
+ assertTrue("Result set empty on statement " + sql, rs.next());
+ assertNotNull("Fail to execute query " + sql, rs);
+ assertEquals(message + " " + targetCount, targetCount, rs.getInt(1));
+ } catch (SQLException se) {
+ fail("SQL exception on query: " + sql
+ + " With exception message: " + se.getLocalizedMessage());
+ }
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixTimelineWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixTimelineWriterImpl.java
deleted file mode 100644
index dece83d..0000000
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixTimelineWriterImpl.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.apache.hadoop.hbase.IntegrationTestingUtility;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-
-public class TestPhoenixTimelineWriterImpl extends BaseTest {
- private static PhoenixTimelineWriterImpl writer;
- private static final int BATCH_SIZE = 3;
-
- @BeforeClass
- public static void setup() throws Exception {
- YarnConfiguration conf = new YarnConfiguration();
- writer = setupPhoenixClusterAndWriterForTest(conf);
- }
-
- @Test(timeout = 90000)
- public void testPhoenixWriterBasic() throws Exception {
- // Set up a list of timeline entities and write them back to Phoenix
- int numEntity = 12;
- TimelineEntities te =
- TestTimelineWriterImpl.getStandardTestTimelineEntities(numEntity);
- writer.write("cluster_1", "user1", "testFlow", "version1", 1l, "app_test_1", te);
- // Verify if we're storing all entities
- String sql = "SELECT COUNT(entity_id) FROM "
- + PhoenixTimelineWriterImpl.ENTITY_TABLE_NAME;
- verifySQLWithCount(sql, numEntity, "Number of entities should be ");
- // Check config (half of all entities)
- sql = "SELECT COUNT(c.config) FROM "
- + PhoenixTimelineWriterImpl.ENTITY_TABLE_NAME + "(c.config VARCHAR) ";
- verifySQLWithCount(sql, (numEntity / 2),
- "Number of entities with config should be ");
- // Check info (half of all entities)
- sql = "SELECT COUNT(i.info1) FROM "
- + PhoenixTimelineWriterImpl.ENTITY_TABLE_NAME + "(i.info1 VARBINARY) ";
- verifySQLWithCount(sql, (numEntity / 2),
- "Number of entities with info should be ");
- // Check config and info (a quarter of all entities)
- sql = "SELECT COUNT(entity_id) FROM "
- + PhoenixTimelineWriterImpl.ENTITY_TABLE_NAME
- + "(c.config VARCHAR, i.info1 VARBINARY) "
- + "WHERE c.config IS NOT NULL AND i.info1 IS NOT NULL";
- verifySQLWithCount(sql, (numEntity / 4),
- "Number of entities with both config and info should be ");
- // Check relatesToEntities and isRelatedToEntities
- sql = "SELECT COUNT(entity_id) FROM "
- + PhoenixTimelineWriterImpl.ENTITY_TABLE_NAME
- + "(rt.testType VARCHAR, ir.testType VARCHAR) "
- + "WHERE rt.testType IS NOT NULL AND ir.testType IS NOT NULL";
- verifySQLWithCount(sql, numEntity - 2,
- "Number of entities with both relatesTo and isRelatedTo should be ");
- // Check event
- sql = "SELECT COUNT(entity_id) FROM "
- + PhoenixTimelineWriterImpl.EVENT_TABLE_NAME;
- verifySQLWithCount(sql, (numEntity / 4), "Number of events should be ");
- // Check metrics
- sql = "SELECT COUNT(entity_id) FROM "
- + PhoenixTimelineWriterImpl.METRIC_TABLE_NAME;
- verifySQLWithCount(sql, (numEntity / 4), "Number of events should be ");
- }
-
- @AfterClass
- public static void cleanup() throws Exception {
- writer.dropTable(PhoenixTimelineWriterImpl.ENTITY_TABLE_NAME);
- writer.dropTable(PhoenixTimelineWriterImpl.EVENT_TABLE_NAME);
- writer.dropTable(PhoenixTimelineWriterImpl.METRIC_TABLE_NAME);
- writer.serviceStop();
- tearDownMiniCluster();
- }
-
- private static PhoenixTimelineWriterImpl setupPhoenixClusterAndWriterForTest(
- YarnConfiguration conf) throws Exception{
- Map props = new HashMap<>();
- // Must update config before starting server
- props.put(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
- Boolean.FALSE.toString());
- props.put("java.security.krb5.realm", "");
- props.put("java.security.krb5.kdc", "");
- props.put(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER,
- Boolean.FALSE.toString());
- props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(5000));
- props.put(IndexWriterUtils.HTABLE_THREAD_KEY, Integer.toString(100));
- // Make a small batch size to test multiple calls to reserve sequences
- props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB,
- Long.toString(BATCH_SIZE));
- // Must update config before starting server
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-
- PhoenixTimelineWriterImpl myWriter = new PhoenixTimelineWriterImpl();
- // Change connection settings for test
- conf.set(
- PhoenixTimelineWriterImpl.TIMELINE_SERVICE_PHOENIX_STORAGE_CONN_STR,
- getUrl());
- myWriter.connProperties = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- myWriter.serviceInit(conf);
- return myWriter;
- }
-
- private void verifySQLWithCount(String sql, int targetCount, String message)
- throws Exception {
- try (
- Statement stmt =
- writer.getConnection().createStatement();
- ResultSet rs = stmt.executeQuery(sql)) {
- assertTrue("Result set empty on statement " + sql, rs.next());
- assertNotNull("Fail to execute query " + sql, rs);
- assertEquals(message + " " + targetCount, targetCount, rs.getInt(1));
- } catch (SQLException se) {
- fail("SQL exception on query: " + sql
- + " With exception message: " + se.getLocalizedMessage());
- }
- }
-}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineWriterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineWriterImpl.java
deleted file mode 100644
index 7a7afc0..0000000
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineWriterImpl.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-
-public class TestTimelineWriterImpl {
- static TimelineEntities getStandardTestTimelineEntities(int listSize) {
- TimelineEntities te = new TimelineEntities();
- for (int i = 0; i < listSize; i++) {
- TimelineEntity entity = new TimelineEntity();
- String id = "hello" + i;
- String type = "testType";
- entity.setId(id);
- entity.setType(type);
- entity.setCreatedTime(1425016501000L + i);
- entity.setModifiedTime(1425016502000L + i);
- if (i > 0) {
- entity.addRelatesToEntity(type, "hello" + i);
- entity.addRelatesToEntity(type, "hello" + (i - 1));
- }
- if (i < listSize - 1) {
- entity.addIsRelatedToEntity(type, "hello" + i);
- entity.addIsRelatedToEntity(type, "hello" + (i + 1));
- }
- int category = i % 4;
- switch (category) {
- case 0:
- entity.addConfig("config", "config" + i);
- // Fall through deliberately
- case 1:
- entity.addInfo("info1", new Integer(i));
- entity.addInfo("info2", "helloworld");
- // Fall through deliberately
- case 2:
- break;
- case 3:
- entity.addConfig("config", "config" + i);
- TimelineEvent event = new TimelineEvent();
- event.setId("test event");
- event.setTimestamp(1425016501100L + i);
- event.addInfo("test_info", "content for " + entity.getId());
- event.addInfo("test_info1", new Integer(i));
- entity.addEvent(event);
- TimelineMetric metric = new TimelineMetric();
- metric.setId("HDFS_BYTES_READ");
- metric.addValue(1425016501100L + i, 8000 + i);
- entity.addMetric(metric);
- break;
- }
- te.addEntity(entity);
- }
- return te;
- }
-}