diff --git llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java index e5c4a00f3b..76a9734a52 100644 --- llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java +++ llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java @@ -22,11 +22,13 @@ import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapred.InputFormat; +import java.util.Map; + public interface LlapIo { InputFormat getInputFormat( InputFormat sourceInputFormat, Deserializer serde); void close(); - String getMemoryInfo(); + Map getMemoryInfo(String filter); /** * purge is best effort and will just release the buffers that are unlocked (refCount == 0). This is typically diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java index 60d6edfdcb..57c30bc1c2 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hive.llap.cache; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; - import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; +import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics; +import org.apache.hadoop.hive.ql.io.orc.encoded.StoppableAllocator; import java.io.File; import java.io.IOException; @@ -34,19 +39,19 @@ import java.nio.file.attribute.FileAttribute; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; +import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collectors; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; -import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics; -import org.apache.hadoop.hive.ql.io.orc.encoded.StoppableAllocator; +import static java.lang.String.valueOf; public final class BuddyAllocator implements EvictionAwareAllocator, StoppableAllocator, BuddyAllocatorMXBean, LlapIoDebugDump { @@ -676,28 +681,33 @@ private void logOomErrorMessage(String msg) { * are connected in a cycle, so we need to make sure the who-calls-whom order is definite. */ @Override - public void debugDumpShort(StringBuilder sb) { - sb.append("\nDefrag counters: "); - for (int i = 0; i < defragCounters.length; ++i) { - sb.append(defragCounters[i].get()).append(", "); - } - sb.append("\nAllocator state:"); + public void debugDumpShort(Map data) { + Map allocator = new HashMap(); + List dc = Arrays.stream(this.defragCounters).map(String::valueOf).collect(Collectors.toList()); + allocator.put("defrag_counters", dc); + List arenaMapList = new ArrayList<>(); int unallocCount = 0, fullCount = 0; long totalFree = 0; - for (Arena arena : arenas) { - Integer result = arena.debugDumpShort(sb); + for (int i = 0; i < arenas.length; i++) { + List> freeList = new ArrayList<>(); + Integer result = arenas[i].debugDumpShort(freeList); if (result == null) { ++unallocCount; } else if (result == 0) { ++fullCount; } else { + Map arenaMap = new HashMap(); + arenaMap.put("name", "arena" + i); + arenaMap.put("free", freeList); + arenaMapList.add(arenaMap); totalFree += result; } } - sb.append("\nTotal available and allocated: ").append(totalFree).append( - "; unallocated arenas: ").append(unallocCount).append( - "; full arenas ").append(fullCount); - sb.append("\n"); + allocator.put("arenas", arenaMapList); + allocator.put("total_available_and_allocated", valueOf(totalFree)); + allocator.put("unallocated_arenas", valueOf(unallocCount)); + allocator.put("full_arenas", valueOf(fullCount)); + data.put(getClass().getSimpleName(), allocator); } @Override @@ -1133,7 +1143,7 @@ private void debugDump(StringBuilder result) { } } - public Integer debugDumpShort(StringBuilder result) { + public Integer debugDumpShort(List> freeArena) { if (data == null) { return null; } @@ -1150,11 +1160,12 @@ public Integer debugDumpShort(StringBuilder result) { nextHeaderIx = getNextFreeListItem(offsetFromHeaderIndex(nextHeaderIx)); } if (count > 0) { - if (total == 0) { - result.append("\nArena with free list lengths by size: "); - } total += (allocSize * count); - result.append(allocSize).append(" => ").append(count).append(", "); + + Map free = new HashMap<>(2); + free.put("size", valueOf(allocSize)); + free.put("count", valueOf(count)); + freeArena.add(free); } } finally { freeList.lock.unlock(); diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java index 64c0125833..2c156da74c 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java @@ -18,7 +18,11 @@ package org.apache.hadoop.hive.llap.cache; +import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; +import java.util.List; +import java.util.Map; import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.hive.llap.LlapUtil; @@ -190,15 +194,21 @@ public long evictSomeBlocks(long memoryToReserve) { } @Override - public void debugDumpShort(StringBuilder sb) { - sb.append("\nCache state: "); + public void debugDumpShort(Map data) { + List states = new ArrayList<>(); + for (TagState state : tagInfo.values()) { synchronized (state) { - sb.append("\n").append(state.name).append(": ").append(state.bufferCount).append("/") - .append(state.maxCount).append(", ").append(state.totalSize).append("/") - .append(state.maxSize); + Map tagState = new HashMap<>(); + tagState.put("state_name", state.name); + tagState.put("buffer_count", String.valueOf(state.bufferCount)); + tagState.put("max_count", String.valueOf(state.maxCount)); + tagState.put("total_size", String.valueOf(state.totalSize)); + tagState.put("max_size", String.valueOf(state.maxSize)); + states.add(tagState); } } + data.put(getClass().getSimpleName(), states); } @Override diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapIoDebugDump.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapIoDebugDump.java index 105b354d07..ee4aeaca36 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapIoDebugDump.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapIoDebugDump.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.llap.cache; +import java.util.Map; + public interface LlapIoDebugDump { - void debugDumpShort(StringBuilder sb); + void debugDumpShort(Map data); } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java index 62d7e55344..4e4d144e90 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java @@ -21,6 +21,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -41,7 +42,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Function; -import com.google.common.base.Joiner; + +import static java.lang.String.valueOf; public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, LlapIoDebugDump { private static final int DEFAULT_CLEANUP_INTERVAL = 600; @@ -450,8 +452,9 @@ public Allocator getAllocator() { } @Override - public void debugDumpShort(StringBuilder sb) { - sb.append("\nORC cache state "); + public void debugDumpShort(Map data) { + List entries = new ArrayList<>(); + int allLocked = 0, allUnlocked = 0, allEvicted = 0, allMoving = 0; long totalUsedSpace = 0; for (Map.Entry>> e : @@ -495,19 +498,15 @@ public void debugDumpShort(StringBuilder sb) { allMoving += fileMoving; totalUsedSpace += fileMemoryUsage; - sb.append("\n file " - + e.getKey() - + ": " - + fileLocked - + " locked, " - + fileUnlocked - + " unlocked, " - + fileEvicted - + " evicted, " - + fileMoving - + " being moved," - + fileMemoryUsage - + " total used byte"); + Map cacheEntry = new HashMap<>(); + cacheEntry.put("file", valueOf(e.getKey())); + cacheEntry.put("locked", valueOf(fileLocked)); + cacheEntry.put("unlocked", valueOf(fileUnlocked)); + cacheEntry.put("evicted", valueOf(fileEvicted)); + cacheEntry.put("being_moved", valueOf(fileMoving)); + cacheEntry.put("total_used_byte", valueOf(fileMemoryUsage)); + entries.add(cacheEntry); + if (fileLocked > 0 && LlapIoImpl.LOCKING_LOGGER.isTraceEnabled()) { LlapIoImpl.LOCKING_LOGGER.trace("locked-buffers: {}", lockedBufs); } @@ -515,16 +514,14 @@ public void debugDumpShort(StringBuilder sb) { e.getValue().decRef(); } } - sb.append("\nORC cache summary: " - + allLocked - + " locked, " - + allUnlocked - + " unlocked, " - + allEvicted - + " evicted, " - + allMoving - + " being moved," - + totalUsedSpace - + "total used space"); + Map cacheSummary = new HashMap(); + cacheSummary.put("entries", entries); + cacheSummary.put("locked", valueOf(allLocked)); + cacheSummary.put("unlocked", valueOf(allUnlocked)); + cacheSummary.put("evicted", valueOf(allEvicted)); + cacheSummary.put("moved", valueOf(allMoving)); + cacheSummary.put("total_used_space", valueOf(totalUsedSpace)); + + data.put(getClass().getSimpleName(), cacheSummary); } } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java index bdc6721f64..7966e597a1 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java @@ -18,9 +18,11 @@ package org.apache.hadoop.hive.llap.cache; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; +import java.util.Map; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -28,6 +30,8 @@ import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; +import static java.lang.String.valueOf; + public class LowLevelFifoCachePolicy implements LowLevelCachePolicy { private final Lock lock = new ReentrantLock(); private final LinkedList buffers; @@ -99,13 +103,15 @@ private long evictInternal(long memoryToReserve, int minSize) { } @Override - public void debugDumpShort(StringBuilder sb) { - sb.append("\nFIFO eviction list: "); + public void debugDumpShort(Map data) { + + Map cachePolicy = new HashMap<>(); lock.lock(); try { - sb.append(buffers.size()).append(" elements)"); + cachePolicy.put("buffer_size", valueOf(buffers.size())); } finally { lock.unlock(); } + data.put(getClass().getSimpleName(), cachePolicy); } } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java index 759819da40..55bac72cf0 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.llap.cache; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; @@ -37,6 +39,8 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.impl.MsInfo; +import static java.lang.String.valueOf; + /** * Implementation of the algorithm from "On the Existence of a Spectrum of Policies * that Subsumes the Least Recently Used (LRU) and Least Frequently Used (LFU) Policies". @@ -597,24 +601,22 @@ private static void dumpList(StringBuilder result, } @Override - public void debugDumpShort(StringBuilder sb) { + public void debugDumpShort(Map data) { + Map cachePolicy = new HashMap<>(); + long[] metricData = metrics.getUsageStats(); - sb.append("\nLRFU eviction list: ") - .append(metricData[PolicyMetrics.LISTSIZE]).append(" items"); - sb.append("\nLRFU eviction heap: ") - .append(heapSize).append(" items (of max ").append(maxHeapSize).append(")"); - sb.append("\nLRFU data on heap: ") - .append(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.DATAONHEAP])); - sb.append("\nLRFU metadata on heap: ") - .append(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.METAONHEAP])); - sb.append("\nLRFU data on eviction list: ") - .append(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.DATAONLIST])); - sb.append("\nLRFU metadata on eviction list: ") - .append(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.METAONLIST])); - sb.append("\nLRFU data locked: ") - .append(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.LOCKEDDATA])); - sb.append("\nLRFU metadata locked: ") - .append(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.LOCKEDMETA])); + cachePolicy.put("eviction_list", valueOf(metricData[PolicyMetrics.LISTSIZE])); + cachePolicy.put("eviction_heap_size", valueOf(heapSize)); + cachePolicy.put("eviction_heap_size_max", valueOf(maxHeapSize)); + cachePolicy.put("data_on_heap", valueOf(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.DATAONHEAP]))); + cachePolicy.put("meta_on_heap", valueOf(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.METAONHEAP]))); + cachePolicy.put("data_on_eviction_list", + valueOf(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.DATAONLIST]))); + cachePolicy.put("meta_on_eviction_list", + valueOf(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.METAONLIST]))); + cachePolicy.put("data_locked", valueOf(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.LOCKEDDATA]))); + cachePolicy.put("meta_locked", valueOf(LlapUtil.humanReadableByteCount(metricData[PolicyMetrics.LOCKEDMETA]))); + data.put(getClass().getSimpleName(), cachePolicy); } /** diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java index 2a39d2d328..4256c2d411 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java @@ -25,6 +25,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -50,6 +51,8 @@ import org.apache.orc.OrcProto; import org.apache.orc.OrcProto.ColumnEncoding; +import static java.lang.String.valueOf; + public class SerDeLowLevelCacheImpl implements BufferUsageManager, LlapIoDebugDump, Configurable { private static final int DEFAULT_CLEANUP_INTERVAL = 600; private Configuration conf; @@ -761,15 +764,17 @@ public Configuration getConf() { } @Override - public void debugDumpShort(StringBuilder sb) { - sb.append("\nSerDe cache state "); + public void debugDumpShort(Map data) { + List cacheEntries = new ArrayList<>(); int allLocked = 0, allUnlocked = 0, allEvicted = 0, allMoving = 0; for (Map.Entry> e : cache.entrySet()) { if (!e.getValue().incRef()) continue; try { + Map entry = new HashMap<>(); FileData fd = e.getValue().getCache(); int fileLocked = 0, fileUnlocked = 0, fileEvicted = 0, fileMoving = 0; - sb.append(fd.colCount).append(" columns, ").append(fd.stripes.size()).append(" stripes; "); + entry.put("columns", valueOf(fd.colCount)); + entry.put("stripes", valueOf(fd.stripes.size())); for (StripeData stripe : fd.stripes) { if (stripe.data == null) continue; for (int i = 0; i < stripe.data.length; ++i) { @@ -805,13 +810,22 @@ public void debugDumpShort(StringBuilder sb) { allUnlocked += fileUnlocked; allEvicted += fileEvicted; allMoving += fileMoving; - sb.append("\n file " + e.getKey() + ": " + fileLocked + " locked, " + fileUnlocked - + " unlocked, " + fileEvicted + " evicted, " + fileMoving + " being moved"); + entry.put("file", valueOf(e.getKey())); + entry.put("locked", valueOf(fileLocked)); + entry.put("unlocked", valueOf(fileUnlocked)); + entry.put("evicted", valueOf(fileEvicted)); + entry.put("being_moved", valueOf(fileMoving)); + cacheEntries.add(entry); } finally { e.getValue().decRef(); } } - sb.append("\nSerDe cache summary: " + allLocked + " locked, " + allUnlocked + " unlocked, " - + allEvicted + " evicted, " + allMoving + " being moved"); + Map summary = new HashMap(); + summary.put("entries", cacheEntries); + summary.put("locked", valueOf(allLocked)); + summary.put("unlocked", valueOf(allUnlocked)); + summary.put("evicted", valueOf(allEvicted)); + summary.put("being_moved", valueOf(allMoving)); + data.put(getClass().getSimpleName(), summary); } } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapIoMemoryServlet.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapIoMemoryServlet.java index 835b46adec..c9cda0d986 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapIoMemoryServlet.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapIoMemoryServlet.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hive.llap.io.api.LlapIo; import org.apache.hadoop.hive.llap.io.api.LlapProxy; import org.apache.hive.http.HttpServer; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.map.SerializationConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,7 +74,10 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) { if (llapIo == null) { writer.write("LLAP IO not found"); } else { - writer.write(llapIo.getMemoryInfo()); + String filter = request.getParameter("f"); + ObjectMapper mapper = new ObjectMapper(); + mapper.enable(SerializationConfig.Feature.INDENT_OUTPUT); + mapper.writeValue(writer, llapIo.getMemoryInfo(filter)); } } finally { diff --git llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java index c63ee5f79b..7d855389ad 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java @@ -18,26 +18,15 @@ package org.apache.hadoop.hive.llap.io.api.impl; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; - -import javax.management.ObjectName; - -import org.apache.hadoop.hive.llap.daemon.impl.StatsRecordingThreadPool; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import com.google.common.primitives.Ints; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.io.Allocator; +import org.apache.hadoop.hive.common.io.Allocator.BufferObjectFactory; import org.apache.hadoop.hive.common.io.DataCache; import org.apache.hadoop.hive.common.io.DiskRange; import org.apache.hadoop.hive.common.io.DiskRangeList; import org.apache.hadoop.hive.common.io.FileMetadataCache; -import org.apache.hadoop.hive.common.io.Allocator.BufferObjectFactory; import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -48,6 +37,7 @@ import org.apache.hadoop.hive.llap.cache.LlapDataBuffer; import org.apache.hadoop.hive.llap.cache.LlapIoDebugDump; import org.apache.hadoop.hive.llap.cache.LowLevelCache; +import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.cache.LowLevelCacheImpl; import org.apache.hadoop.hive.llap.cache.LowLevelCacheMemoryManager; import org.apache.hadoop.hive.llap.cache.LowLevelCachePolicy; @@ -56,7 +46,7 @@ import org.apache.hadoop.hive.llap.cache.SerDeLowLevelCacheImpl; import org.apache.hadoop.hive.llap.cache.SimpleAllocator; import org.apache.hadoop.hive.llap.cache.SimpleBufferManager; -import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; +import org.apache.hadoop.hive.llap.daemon.impl.StatsRecordingThreadPool; import org.apache.hadoop.hive.llap.io.api.LlapIo; import org.apache.hadoop.hive.llap.io.decode.ColumnVectorProducer; import org.apache.hadoop.hive.llap.io.decode.GenericColumnVectorProducer; @@ -67,24 +57,28 @@ import org.apache.hadoop.hive.llap.metrics.MetricsUtils; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.io.LlapCacheOnlyInputFormatInterface; -import org.apache.hadoop.hive.ql.io.orc.encoded.IoTrace; import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; +import org.apache.hadoop.hive.ql.io.orc.encoded.IoTrace; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hive.common.util.FixedSizedObjectPool; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import javax.management.ObjectName; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; - - - - - -import com.google.common.primitives.Ints; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -public class LlapIoImpl implements LlapIo, LlapIoDebugDump { +public class LlapIoImpl implements LlapIo { public static final Logger LOG = LoggerFactory.getLogger("LlapIoImpl"); public static final Logger ORC_LOGGER = LoggerFactory.getLogger("LlapIoOrc"); public static final Logger CACHE_LOGGER = LoggerFactory.getLogger("LlapIoCache"); @@ -203,8 +197,8 @@ private LlapIoImpl(Configuration conf) throws IOException { this.memoryManager = null; debugDumpComponents.add(new LlapIoDebugDump() { @Override - public void debugDumpShort(StringBuilder sb) { - sb.append("LLAP IO allocator is not in use!"); + public void debugDumpShort(Map data) { + data.put("", "LLAP IO allocator is not in use!"); } }); } @@ -229,10 +223,10 @@ private void registerMXBeans() { } @Override - public String getMemoryInfo() { - StringBuilder sb = new StringBuilder(); - debugDumpShort(sb); - return sb.toString(); + public Map getMemoryInfo(String filter) { + Map data = new HashMap(); + debugDumpShort(data, filter); + return data; } @Override @@ -331,10 +325,11 @@ public MemoryBuffer create() { } } - @Override - public void debugDumpShort(StringBuilder sb) { + public void debugDumpShort(Map data, String filter) { for (LlapIoDebugDump child : debugDumpComponents) { - child.debugDumpShort(sb); + if (filter == null || child.getClass().getSimpleName().equals(filter)) { + child.debugDumpShort(data); + } } } } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/MetadataCache.java llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/MetadataCache.java index 8400fe9841..83ff18b71c 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/MetadataCache.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/MetadataCache.java @@ -19,33 +19,32 @@ package org.apache.hadoop.hive.llap.io.metadata; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.io.FileMetadataCache; - -import java.io.IOException; -import java.io.InputStream; - -import org.apache.hadoop.hive.common.io.encoded.MemoryBufferOrBuffers; - -import java.nio.ByteBuffer; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicBoolean; - +import org.apache.hadoop.hive.common.io.DataCache.BooleanRef; import org.apache.hadoop.hive.common.io.DiskRange; import org.apache.hadoop.hive.common.io.DiskRangeList; -import org.apache.hadoop.hive.common.io.DataCache.BooleanRef; +import org.apache.hadoop.hive.common.io.FileMetadataCache; import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer; +import org.apache.hadoop.hive.common.io.encoded.MemoryBufferOrBuffers; import org.apache.hadoop.hive.llap.cache.BuddyAllocator; -import org.apache.hadoop.hive.llap.cache.EvictionAwareAllocator; import org.apache.hadoop.hive.llap.cache.EvictionDispatcher; import org.apache.hadoop.hive.llap.cache.LlapAllocatorBuffer; import org.apache.hadoop.hive.llap.cache.LlapIoDebugDump; +import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.cache.LowLevelCachePolicy; import org.apache.hadoop.hive.llap.cache.MemoryManager; -import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority; import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl; import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics; import org.apache.hadoop.hive.ql.io.orc.encoded.OrcBatchKey; -import org.apache.hadoop.hive.ql.io.orc.encoded.StoppableAllocator; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.lang.String.valueOf; public class MetadataCache implements LlapIoDebugDump, FileMetadataCache { private final ConcurrentHashMap metadata = @@ -126,23 +125,23 @@ public void notifyEvicted(OrcFileEstimateErrors buffer) { @Override - public void debugDumpShort(StringBuilder sb) { - sb.append("\nMetadata cache state: ") - .append(metadata.size()) - .append(" files and stripes, ") - .append(metadata.values().parallelStream().mapToLong(value -> { - if (value.getSingleLlapBuffer() != null) { - return value.getSingleLlapBuffer().allocSize; - } - long sum = 0; - for (LlapAllocatorBuffer llapMetadataBuffer : value.getMultipleLlapBuffers()) { - sum += llapMetadataBuffer.allocSize; - } - return sum; - }).sum()) - .append(" total used bytes, ") - .append(estimateErrors.size()) - .append(" files w/ORC estimate"); + public void debugDumpShort(Map data) { + long totalUsedBytes = metadata.values().parallelStream().mapToLong(value -> { + if (value.getSingleLlapBuffer() != null) { + return value.getSingleLlapBuffer().allocSize; + } + long sum = 0; + for (LlapAllocatorBuffer llapMetadataBuffer : value.getMultipleLlapBuffers()) { + sum += llapMetadataBuffer.allocSize; + } + return sum; + }).sum(); + + Map state = new HashMap<>(); + state.put("files_and_stripes", valueOf(metadata.size())); + state.put("total_used_bytes", valueOf(totalUsedBytes)); + state.put("files_w_ORC_estimate", valueOf(estimateErrors.size())); + data.put(getClass().getSimpleName(), state); } @Override diff --git llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java index 1c2eef2d5f..de310fe386 100644 --- llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java +++ llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import java.util.Arrays; +import java.util.Map; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; @@ -121,7 +122,7 @@ public long purge() { } @Override - public void debugDumpShort(StringBuilder sb) { + public void debugDumpShort(Map data) { } } diff --git llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java index ad6b90e358..b9dabc0a92 100644 --- llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java +++ llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java @@ -20,6 +20,7 @@ import static org.junit.Assert.*; import java.nio.ByteBuffer; +import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; @@ -74,7 +75,7 @@ public void verifyEquals(int i) { } @Override - public void debugDumpShort(StringBuilder sb) { + public void debugDumpShort(Map data) { } } diff --git ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormat.java ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormat.java index 5452c38863..8fa5405aad 100644 --- ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormat.java +++ ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormat.java @@ -19,22 +19,14 @@ import java.io.IOException; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.hive.llap.io.api.LlapProxy; import org.apache.hadoop.hive.ql.io.StreamingOutputFormat; -import com.google.common.base.Preconditions; - public class LlapOutputFormat implements OutputFormat, StreamingOutputFormat {