MOST IMPORTANT ISSUES

  Overhead  Problem 
      Section 2. Where Memory Goes, by Class
 544,879K (64.5%)High memory usage by byte[]
 120,817K (14.3%)High memory usage by String
      Section 3. Where Memory Goes, by GC Root (Check for Memory Leaks)
 251,868K (29.8%)High memory amount retained by Object tree for GC root(s) Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
 236,338K (28.0%)High memory amount retained by Object tree for GC root(s) Java Local(org.apache.parquet.hadoop.ParquetFileReader) [@e27511e0,@e2ba8838,@e2c05ca0,@e2c05e68] ... and 26 more GC roots (30 thread(s))
 183,025K (21.7%)High memory amount retained by Object tree for GC root(s) Java Local(java.nio.HeapByteBuffer) [@e2c258a0,@e6da5218,@e6dd49f8,@e6ddbef8] ... and 23 more GC roots (27 thread(s))
      Section 7. Duplicate Strings:  overhead 13.6% 
 115,322K (13.6%)High overhead of all duplicate strings
      Section 8. Bad Collections:  overhead 3.3% 
 28,124K (3.3%)High overhead of all bad collections
      Section 10. Bad Primitive Arrays:  overhead 28.9% 
 244,002K (28.9%)High overhead of all bad primitive arrays
 165,046K (19.5%)High overhead of type trail-0s byte[]
 78,470K (9.3%)High overhead of type empty byte[]




1. Top-Level Stats
Generated by JXRay version 2.1 update 1

Heap dump java_pid173815.hprof created on Tue Jul 03 14:44:57 PDT 2018
JVM version: 1.8.0_141

     Instances   Object arrays   Primitive arrays  Total 
 Objects 4,907,989 308,461 844,4066,060,856
 Bytes 157,778K (18.7%) 40,410K (4.8%) 646,720K (76.5%)844,909K (100.0%)


     Live   Garbage  Total 
 Objects 6,040,101 20,7556,060,856
 Bytes 835,881K (98.9%) 9,027K (1.1%)844,909K (100.0%)


  Number of classes  Number of threads 
 9,09995


  JVM pointer size  Object header size 
 412




2. Where Memory Goes, by Class

  # instances   Shallow size   Impl-inclusive size  Class name 
 2,618 544,879K (64.5%) 544,879K (64.5%)byte[]
Reference chains
Expensive data fields

435,908K (51.6%): byte[]: 103 / 100% objects

 Random sample 
      byte[9495226]{21, 4, 21, -128, -120, 39, 21, -50, -68, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 18, -86, 2, 0, 1, 1, 8, ...}
      byte[9475773]{21, 4, 21, -128, -120, 39, 21, -100, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 3, 109, 1, 0, 1, 1, 8, ...}
      byte[14366023]{21, 4, 21, -128, -120, 39, 21, -54, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -31, 22, 3, 0, 1, 1, 8, ...}
      byte[4210480]{21, 4, 21, -128, -120, 39, 21, -56, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 10, 114, 2, 0, 1, 1, 8, ...}
      byte[14343952]{21, 4, 21, -128, -120, 39, 21, -22, -69, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -78, -63, 3, 0, 1, 1, 8, ...}
      byte[4217201]{21, 4, 21, -128, -120, 39, 21, -120, -71, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 47, -84, 3, 0, 1, 1, 8, ...}
      byte[9497544]{21, 4, 21, -128, -120, 39, 21, -44, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 96, 19, 3, 0, 1, 1, 8, ...}
      byte[14370092]{21, 4, 21, -128, -120, 39, 21, -112, -65, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -42, 69, 1, 0, 1, 1, 4, ...}
      byte[4191096]{21, 4, 21, -128, -120, 39, 21, -66, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 65, -115, 3, 0, 1, 1, 4, ...}

 ↖java.nio.HeapByteBuffer.hb
81,920K (9.7%): byte[]: 5 / 100% objects

 Random sample 
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, -36, 3, 102, 0, -76, 23, 98, -82, -117, -126, 0, 0, 0, -57, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 1, -61, -92, -115, 33, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

 ↖io.netty.buffer.PoolChunk.memory
16,897K (2.0%): byte[]: 37 / 100% objects

 Random sample 
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}

 ↖org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.buffer
1,536K (0.2%): byte[]: 48 / 100% objects

 Random sample 
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

 ↖sun.nio.ch.EPollArrayWrapper.eventsLow


Full reference chains

235,392K (27.9%): byte[]: 29 / 100% objects

 Random sample 
      byte[9495226]{21, 4, 21, -128, -120, 39, 21, -50, -68, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 18, -86, 2, 0, 1, 1, 8, ...}
      byte[14357419]{21, 4, 21, -128, -120, 39, 21, -108, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 8, 32, -44, 0, 5, 1, 8, -50, ...}
      byte[9475773]{21, 4, 21, -128, -120, 39, 21, -100, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 3, 109, 1, 0, 1, 1, 8, ...}
      byte[4194217]{21, 4, 21, -128, -120, 39, 21, -78, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 37, 71, 4, 0, 1, 1, 8, ...}
      byte[14366023]{21, 4, 21, -128, -120, 39, 21, -54, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -31, 22, 3, 0, 1, 1, 8, ...}
      byte[9504532]{21, 4, 21, -128, -120, 39, 21, -104, -68, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -21, 36, 2, 0, 1, 1, 4, ...}
      byte[9492965]{21, 4, 21, -128, -120, 39, 21, -52, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -84, 10, 3, 0, 1, 1, 4, ...}
      byte[4208320]{21, 4, 21, -128, -120, 39, 21, -76, -63, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 100, -85, 3, 0, 1, 1, 8, ...}
      byte[9479465]{21, 4, 21, -128, -120, 39, 21, -18, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -21, 59, 1, 0, 1, 1, 8, ...}
      byte[4210480]{21, 4, 21, -128, -120, 39, 21, -56, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 10, 114, 2, 0, 1, 1, 8, ...}
      byte[4202195]{21, 4, 21, -128, -120, 39, 21, -82, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -109, 55, 4, 0, 1, 1, 4, ...}
      byte[14343952]{21, 4, 21, -128, -120, 39, 21, -22, -69, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -78, -63, 3, 0, 1, 1, 8, ...}
      byte[4217201]{21, 4, 21, -128, -120, 39, 21, -120, -71, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 47, -84, 3, 0, 1, 1, 8, ...}
      byte[9497544]{21, 4, 21, -128, -120, 39, 21, -44, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 96, 19, 3, 0, 1, 1, 8, ...}
      byte[4182422]{21, 4, 21, -128, -120, 39, 21, -90, -72, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 111, 24, 3, 0, 1, 1, 4, ...}
      byte[9463287]{21, 4, 21, -128, -120, 39, 21, -64, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 77, 36, 1, 0, 1, 1, 8, ...}
      byte[14370092]{21, 4, 21, -128, -120, 39, 21, -112, -65, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -42, 69, 1, 0, 1, 1, 4, ...}
      byte[14354056]{21, 4, 21, -128, -120, 39, 21, -86, -72, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -43, 96, 4, 0, 1, 1, 8, ...}
      byte[4191096]{21, 4, 21, -128, -120, 39, 21, -66, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 65, -115, 3, 0, 1, 1, 4, ...}
      byte[4228405]{21, 4, 21, -128, -120, 39, 21, -2, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 8, 4, 102, 0, 5, 1, 8, 81, ...}

java.nio.HeapByteBuffer.hb org.apache.parquet.bytes.BytesInput$ByteBufferBytesInput.byteBuf
org.apache.parquet.column.page.DictionaryPage.bytes
org.apache.parquet.hadoop.ColumnChunkPageReadStore$ColumnChunkPageReader.compressedDictionaryPage
{j.u.HashMap}.values
org.apache.parquet.hadoop.ColumnChunkPageReadStore.readers
org.apache.parquet.hadoop.ParquetFileReader.currentRowGroup
↖Java Local(org.apache.parquet.hadoop.ParquetFileReader) [@e27511e0,@e2ba8838,@e2c05ca0,@e2c05e68] ... and 26 more GC roots (30 thread(s))

183,023K (21.7%): byte[]: 27 / 100% objects

 Random sample 
      byte[7906230]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[3529619]{21, 4, 21, -128, -60, 19, 21, -70, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, -24, -29, 0, ...}
      byte[3508221]{21, 4, 21, -128, -60, 19, 21, -80, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, -96, 4, 0, ...}
      byte[11965825]{21, 4, 21, -128, -60, 19, 21, -80, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -6, 50, 0, 4, -48, 76, 0, ...}
      byte[7907662]{21, 4, 21, -128, -60, 19, 21, -74, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 43, 0, 0, ...}
      byte[3498414]{21, 4, 21, -128, -60, 19, 21, -80, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -26, 115, 0, 11, -61, -102, 0, ...}
      byte[7916981]{21, 4, 21, -128, -60, 19, 21, -90, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 13, 16, 0, ...}
      byte[3510525]{21, 4, 21, -128, -60, 19, 21, -90, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 9, -128, 72, 0, ...}
      byte[7903728]{21, 4, 21, -128, -60, 19, 21, -92, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, -65, 22, 0, ...}
      byte[3494787]{21, 4, 21, -128, -60, 19, 21, -92, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 20, -54, 0, ...}
      byte[11943069]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[3504591]{21, 4, 21, -128, -60, 19, 21, -84, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, 110, 55, 0, 0, 21, -56, 0, ...}
      byte[7913684]{21, 4, 21, -128, -60, 19, 21, -82, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, 96, 0, 4, -48, -102, 0, 0, -84, ...}
      byte[3497415]{21, 4, 21, -128, -60, 19, 21, -92, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 12, 63, 0, ...}
      byte[7921621]{21, 4, 21, -128, -60, 19, 21, -88, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -106, 71, 0, 3, -38, 56, 0, ...}
      byte[3510894]{21, 4, 21, -128, -60, 19, 21, -88, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 55, -38, 0, ...}
      byte[11957958]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[7905949]{21, 4, 21, -128, -60, 19, 21, -88, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 121, -63, 0, ...}
      byte[11961169]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[11959631]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

java.nio.HeapByteBuffer.hb ↖Java Local(java.nio.HeapByteBuffer) [@e2c258a0,@e6da5218,@e6dd49f8,@e6ddbef8] ... and 23 more GC roots (27 thread(s))

65,536K (7.8%): byte[]: 4 / 100% objects

 Random sample 
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, -36, 3, 102, 0, -76, 23, 98, -82, -117, -126, 0, 0, 0, -57, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[16777216]{0, 0, 0, 0, 0, 0, 1, -30, 3, 83, -80, -17, 5, 113, -87, -83, 8, 0, 0, 1, -51, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 1, -61, -92, -115, 33, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 1, -20, -41, 22, 0, -120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

io.netty.buffer.PoolChunk.memory io.netty.buffer.PoolChunkList.head
io.netty.buffer.PoolArena$HeapArena.qInit
io.netty.buffer.PoolThreadCache.heapArena
Object[]
io.netty.util.internal.InternalThreadLocalMap.indexedVariables
io.netty.util.concurrent.FastThreadLocalThread.threadLocalMap
io.netty.channel.nio.NioEventLoop.thread
{j.u.IdentityHashMap}
io.netty.resolver.DefaultAddressResolverGroup.resolvers
↖Java Static io.netty.resolver.DefaultAddressResolverGroup.INSTANCE
16,384K (1.9%): byte[]: 1 / 100% objects

 Random sample 
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, -83, 3, 122, 11, -34, -77, -32, 5, 105, 90, 0, 0, 0, -104, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

io.netty.buffer.PoolChunk.memory io.netty.buffer.PoolChunkList.head
io.netty.buffer.PoolArena$HeapArena.qInit
io.netty.buffer.PoolArena[]
io.netty.buffer.PooledByteBufAllocator.heapArenas
org.apache.spark.network.client.TransportClientFactory.pooledAllocator
org.apache.spark.rpc.netty.NettyRpcEnv.clientFactory
↖Java Local@c0029b50 (org.apache.spark.rpc.netty.NettyRpcEnv)

14,157K (1.7%): byte[]: 31 / 100% objects

 Random sample 
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}

org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.buffer org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.f$9
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.cleanedF$1
org.apache.spark.rdd.MapPartitionsRDD.f
↖Java Local(org.apache.spark.rdd.MapPartitionsRDD) [@c571a210,@c8cc8560,@c8e31f18,@c8ef2878] ... and 33 more GC roots (37 thread(s))

9,255K (1.1%): byte[]: 1 / 100% objects

 Random sample 
      byte[9477381]{21, 4, 21, -128, -120, 39, 21, -32, -74, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 8, 121, -115, 0, 5, 1, 8, 9, ...}

java.nio.HeapByteBuffer.hb org.apache.parquet.hadoop.ParquetFileReader$WorkaroundChunk.byteBuf
{j.u.ArrayList}
↖Java Local(j.u.ArrayList) [@e2c25570,@e2c255f0] (1 thread(s))

7,752K (0.9%): byte[]: 1 / 100% objects

 Random sample 
      byte[7938689]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

java.nio.HeapByteBuffer.hb ↖Java Local@f0bec7c0 (java.nio.HeapByteBuffer)

7,712K (0.9%): byte[]: 2 / 0% objects

 Random sample 
      byte[7893124]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[4096]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

↖Unreachable
  All or some objects may start live as:

235,392K (27.9%): byte[]: 29 / 100% objects

 Random sample 
      byte[9495226]{21, 4, 21, -128, -120, 39, 21, -50, -68, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 18, -86, 2, 0, 1, 1, 8, ...}
      byte[14357419]{21, 4, 21, -128, -120, 39, 21, -108, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 8, 32, -44, 0, 5, 1, 8, -50, ...}
      byte[9475773]{21, 4, 21, -128, -120, 39, 21, -100, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 3, 109, 1, 0, 1, 1, 8, ...}
      byte[4194217]{21, 4, 21, -128, -120, 39, 21, -78, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 37, 71, 4, 0, 1, 1, 8, ...}
      byte[14366023]{21, 4, 21, -128, -120, 39, 21, -54, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -31, 22, 3, 0, 1, 1, 8, ...}
      byte[9504532]{21, 4, 21, -128, -120, 39, 21, -104, -68, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -21, 36, 2, 0, 1, 1, 4, ...}
      byte[9492965]{21, 4, 21, -128, -120, 39, 21, -52, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -84, 10, 3, 0, 1, 1, 4, ...}
      byte[4208320]{21, 4, 21, -128, -120, 39, 21, -76, -63, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 100, -85, 3, 0, 1, 1, 8, ...}
      byte[9479465]{21, 4, 21, -128, -120, 39, 21, -18, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -21, 59, 1, 0, 1, 1, 8, ...}
      byte[4210480]{21, 4, 21, -128, -120, 39, 21, -56, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 10, 114, 2, 0, 1, 1, 8, ...}
      byte[4202195]{21, 4, 21, -128, -120, 39, 21, -82, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -109, 55, 4, 0, 1, 1, 4, ...}
      byte[14343952]{21, 4, 21, -128, -120, 39, 21, -22, -69, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -78, -63, 3, 0, 1, 1, 8, ...}
      byte[4217201]{21, 4, 21, -128, -120, 39, 21, -120, -71, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 47, -84, 3, 0, 1, 1, 8, ...}
      byte[9497544]{21, 4, 21, -128, -120, 39, 21, -44, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 96, 19, 3, 0, 1, 1, 8, ...}
      byte[4182422]{21, 4, 21, -128, -120, 39, 21, -90, -72, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 111, 24, 3, 0, 1, 1, 4, ...}
      byte[9463287]{21, 4, 21, -128, -120, 39, 21, -64, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 77, 36, 1, 0, 1, 1, 8, ...}
      byte[14370092]{21, 4, 21, -128, -120, 39, 21, -112, -65, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -42, 69, 1, 0, 1, 1, 4, ...}
      byte[14354056]{21, 4, 21, -128, -120, 39, 21, -86, -72, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, -43, 96, 4, 0, 1, 1, 8, ...}
      byte[4191096]{21, 4, 21, -128, -120, 39, 21, -66, -66, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 12, 65, -115, 3, 0, 1, 1, 4, ...}
      byte[4228405]{21, 4, 21, -128, -120, 39, 21, -2, -67, 23, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -60, 19, 8, 4, 102, 0, 5, 1, 8, 81, ...}

java.nio.HeapByteBuffer.hb org.apache.parquet.bytes.BytesInput$ByteBufferBytesInput.byteBuf
org.apache.parquet.column.page.DictionaryPage.bytes
org.apache.parquet.hadoop.ColumnChunkPageReadStore$ColumnChunkPageReader.compressedDictionaryPage
{j.u.HashMap}.values
org.apache.parquet.hadoop.ColumnChunkPageReadStore.readers
org.apache.parquet.hadoop.ParquetFileReader.currentRowGroup
↖Java Local(org.apache.parquet.hadoop.ParquetFileReader) [@e27511e0,@e2ba8838,@e2c05ca0,@e2c05e68] ... and 26 more GC roots (30 thread(s))

183,023K (21.7%): byte[]: 27 / 100% objects

 Random sample 
      byte[7906230]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[3529619]{21, 4, 21, -128, -60, 19, 21, -70, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, -24, -29, 0, ...}
      byte[3508221]{21, 4, 21, -128, -60, 19, 21, -80, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, -96, 4, 0, ...}
      byte[11965825]{21, 4, 21, -128, -60, 19, 21, -80, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -6, 50, 0, 4, -48, 76, 0, ...}
      byte[7907662]{21, 4, 21, -128, -60, 19, 21, -74, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 43, 0, 0, ...}
      byte[3498414]{21, 4, 21, -128, -60, 19, 21, -80, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -26, 115, 0, 11, -61, -102, 0, ...}
      byte[7916981]{21, 4, 21, -128, -60, 19, 21, -90, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 13, 16, 0, ...}
      byte[3510525]{21, 4, 21, -128, -60, 19, 21, -90, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 9, -128, 72, 0, ...}
      byte[7903728]{21, 4, 21, -128, -60, 19, 21, -92, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, -65, 22, 0, ...}
      byte[3494787]{21, 4, 21, -128, -60, 19, 21, -92, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 20, -54, 0, ...}
      byte[11943069]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[3504591]{21, 4, 21, -128, -60, 19, 21, -84, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, 110, 55, 0, 0, 21, -56, 0, ...}
      byte[7913684]{21, 4, 21, -128, -60, 19, 21, -82, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, 96, 0, 4, -48, -102, 0, 0, -84, ...}
      byte[3497415]{21, 4, 21, -128, -60, 19, 21, -92, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 12, 63, 0, ...}
      byte[7921621]{21, 4, 21, -128, -60, 19, 21, -88, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -106, 71, 0, 3, -38, 56, 0, ...}
      byte[3510894]{21, 4, 21, -128, -60, 19, 21, -88, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 55, -38, 0, ...}
      byte[11957958]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[7905949]{21, 4, 21, -128, -60, 19, 21, -88, -60, 19, 76, 21, -128, -15, 4, 21, 4, 0, 0, -128, -30, 9, -12, -1, 127, 0, 0, 121, -63, 0, ...}
      byte[11961169]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[11959631]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

java.nio.HeapByteBuffer.hb ↖Java Local(java.nio.HeapByteBuffer) [@e2c258a0,@e6da5218,@e6dd49f8,@e6ddbef8] ... and 23 more GC roots (27 thread(s))

65,536K (7.8%): byte[]: 4 / 100% objects

 Random sample 
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, -36, 3, 102, 0, -76, 23, 98, -82, -117, -126, 0, 0, 0, -57, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[16777216]{0, 0, 0, 0, 0, 0, 1, -30, 3, 83, -80, -17, 5, 113, -87, -83, 8, 0, 0, 1, -51, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 1, -61, -92, -115, 33, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 1, -20, -41, 22, 0, -120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

io.netty.buffer.PoolChunk.memory io.netty.buffer.PoolChunkList.head
io.netty.buffer.PoolArena$HeapArena.qInit
io.netty.buffer.PoolThreadCache.heapArena
Object[]
io.netty.util.internal.InternalThreadLocalMap.indexedVariables
io.netty.util.concurrent.FastThreadLocalThread.threadLocalMap
io.netty.channel.nio.NioEventLoop.thread
{j.u.IdentityHashMap}
io.netty.resolver.DefaultAddressResolverGroup.resolvers
↖Java Static io.netty.resolver.DefaultAddressResolverGroup.INSTANCE
16,384K (1.9%): byte[]: 1 / 100% objects

 Random sample 
      byte[16777216]{0, 0, 0, 0, 0, 0, 0, -83, 3, 122, 11, -34, -77, -32, 5, 105, 90, 0, 0, 0, -104, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

io.netty.buffer.PoolChunk.memory io.netty.buffer.PoolChunkList.head
io.netty.buffer.PoolArena$HeapArena.qInit
io.netty.buffer.PoolArena[]
io.netty.buffer.PooledByteBufAllocator.heapArenas
org.apache.spark.network.client.TransportClientFactory.pooledAllocator
org.apache.spark.rpc.netty.NettyRpcEnv.clientFactory
↖Java Local@c0029b50 (org.apache.spark.rpc.netty.NettyRpcEnv)



2,740K (0.3%): byte[]: 6 / 100% objects

 Random sample 
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
      byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}

org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.buffer ↖Java Local(org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction) [@c9f25298,@cce80178,@cd208d10,@cd27c2c0] ... and 2 more GC roots (6 thread(s))



 837,830 19,636K (2.3%) 120,817K (14.3%)String
Reference chains
Expensive data fields

24,599K (2.9%): String: 131050 / 100% objects

 Random sample 
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452217"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451321"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452290"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450819"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451101"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451598"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451680"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450928"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452387"

 ↖java.net.URI.path
21,105K (2.5%): String: 131221 / 100% objects

 Random sample 
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451832"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451992"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451650"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451506"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451235"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451246"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451015"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452377"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452639"

 ↖j.u.Hashtable$Entry.value
18,034K (2.1%): String: 67837 / 100% objects

 Random sample 
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451876"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451438"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451446"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451009"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452010"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452198"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452422"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452289"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451501"

 ↖java.net.URI.string
17,601K (2.1%): String: 71161 / 100% objects

 Random sample 
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451875"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451291"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451448"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451009"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452010"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452201"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452627"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452289"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451502"

 ↖java.net.URI.schemeSpecificPart
6,874K (0.8%): String: 67688 / 100% objects

 Random sample 
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"

 ↖java.net.URI.authority
6,345K (0.8%): String: 67688 / 100% objects

 Random sample 
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"

 ↖java.net.URI.host
5,796K (0.7%): String: 67451 / 100% objects

 Random sample 
      "ss_sold_date_sk=2452105"
      "ss_sold_date_sk=2451391"
      "ss_sold_date_sk=2451480"
      "ss_sold_date_sk=2452403"
      "ss_sold_date_sk=2452041"
      "ss_sold_date_sk=2452470"
      "ss_sold_date_sk=2450970"
      "ss_sold_date_sk=2451854"
      "ss_sold_date_sk=2452329"
      "ss_sold_date_sk=2451594"
      "ss_sold_date_sk=2451236"
      "ss_sold_date_sk=2452012"
      "ss_sold_date_sk=2452194"
      "ss_sold_date_sk=2452475"
      "ss_sold_date_sk=2451430"
      "ss_sold_date_sk=2451901"
      "ss_sold_date_sk=2451749"
      "ss_sold_date_sk=2451512"
      "ss_sold_date_sk=2450822"
      "ss_sold_date_sk=2450988"

org.apache.hadoop.hive.ql.plan.PartitionDesc.baseFileName {j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,688K (0.4%): String: 67451 / 100% objects

 Random sample 
      "2451672"
      "2450816"
      "2452296"
      "2450816"
      "2451875"
      "2450816"
      "2450816"
      "2450816"
      "2450816"
      "2451507"
      "2450816"
      "2451679"
      "2451165"
      "2450838"
      "2450816"
      "2450816"
      "2452468"
      "2452509"

{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.PartitionDesc.partSpec
3,173K (0.4%): String: 67691 / 100% objects

 Random sample 
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"

 ↖java.net.URI.scheme
2,614K (0.3%): String: 7204 / 100% objects

 Random sample 
      "true"
      "1,14,1,14"
      "3"
      "(ss_sold_date_sk is not null and ss_item_sk is not null)"
      "262144"
      "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 396]"
      "(ss_sold_date_sk is not null and ss_item_sk is not null)"
      "AQEAamF2YS51dGlsLkFycmF5TGlz9AECAQFvcmcuYXBhY2hlLmhhZG9vcC5oaXZlLnFsLnBsYW4uRXhwck5vZGVHZW5lcmljRnVuY0Rlc+MBAQABAQECb3JnLmFw ...[length 596]"
      "2"
      "15"
      "2147483647"
      "100"
      "65536"
      "604800s"

{j.u.Properties}.values org.apache.hadoop.mapred.JobConf.properties
1,501K (0.2%): String: 8266 / 100% objects

 Random sample 
      "1200000"
      "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 396]"
      "5"
      "(ss_sold_date_sk is not null and ss_item_sk is not null)"
      "false"
      "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 396]"
      "1,14,1,14"
      "(ss_sold_date_sk is not null and ss_item_sk is not null)"
      "AQEAamF2YS51dGlsLkFycmF5TGlz9AECAQFvcmcuYXBhY2hlLmhhZG9vcC5oaXZlLnFsLnBsYW4uRXhwck5vZGVHZW5lcmljRnVuY0Rlc+MBAQABAQECb3JnLmFw ...[length 596]"
      "31"
      "1,14,1,14"
      "104857600"
      "5000ms"
      "false"
      "1,14,1,14"
      "(ss_sold_date_sk is not null and ss_item_sk is not null)"
      "10"
      "SAME_MOUNTPOINT"

{j.u.Properties}.values org.apache.hadoop.mapred.JobConf.overlay
995K (0.1%): String: 6922 / 100% objects

 Random sample 
      "java.util.concurrent.ConcurrentNavigableMap"
      "scala.concurrent.Future$$anonfun$mapTo$1"
      "org.apache.avro.io.BinaryDecoder"
      "org.apache.spark.api.python.PythonBroadcast"
      "io.netty.util.internal.shaded.org.jctools.queues.atomic.BaseLinkedAtomicQueuePad1"
      "org.apache.spark.network.protocol.Message"
      "com.ctc.wstx.util.TextBuffer"
      "scala.collection.convert.WrapAsJava$"
      "org.apache.parquet.format.converter.ParquetMetadataConverter$SkipMetadataFilter"
      "com.google.protobuf.AbstractParser"
      "org.apache.hadoop.hive.ql.exec.FileSinkOperator$RecordWriter"
      "org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos$DisableErasureCodingPolicyRequestProto$1"
      "scala.collection.immutable.Set$EmptySet$"
      "org.apache.hadoop.ipc.CallerContext$CurrentCallerContextHolder"
      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$GetDatanodeReportResponseProto"
      "scala.sys.SystemProperties"
      "io.netty.channel.DefaultChannelPipeline$PendingHandlerAddedTask"
      "org.apache.spark.SSLOptions$$anonfun$7$$anonfun$apply$3"
      "com.fasterxml.jackson.databind.type.CollectionType"
      "org.apache.hadoop.hdfs.protocol.proto.AclProtos$RemoveAclResponseProtoOrBuilder"

{java.util.concurrent.ConcurrentHashMap}.keys sun.misc.Launcher$AppClassLoader.parallelLockMap
↖Java Static [org.apache.hive.com.esotericsoftware.reflectasm.AccessClassLoader.selfContextParentClassLoader], JNI Global [@c0056890]


Full reference chains

17,427K (2.1%): String: 65609 / 100% objects

 Random sample 
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451832"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450888"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451992"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450945"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451650"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451125"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451263"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451423"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452068"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451506"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451876"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451235"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451246"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451015"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451399"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451661"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452377"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452212"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452639"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451980"

j.u.Hashtable$Entry.value j.u.Hashtable$Entry.{next}
j.u.Hashtable$Entry[]
org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table
org.apache.hadoop.hive.ql.plan.PartitionDesc.properties
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
10,005K (1.2%): String: 53364 / 100% objects

 Random sample 
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452217"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451685"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451321"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452286"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452290"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450904"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450914"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452179"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451604"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450819"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452598"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451101"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451598"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451680"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451834"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452327"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450928"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452353"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452387"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450902"

java.net.URI.path org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo
↖Java Local(org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher) [@d1c1b630,@d1c2af88,@d1c2b228,@d1c3ea88] ... and 27 more GC roots (31 thread(s))

9,549K (1.1%): String: 35950 / 100% objects

 Random sample 
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451876"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451967"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451438"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452231"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451446"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451025"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450916"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451553"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452022"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451009"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452556"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452010"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452198"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452422"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451029"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451902"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452289"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452458"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451501"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451083"

java.net.URI.string org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
8,987K (1.1%): String: 35950 / 100% objects

 Random sample 
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451875"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451837"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451291"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452231"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451448"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451026"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450916"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451553"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452022"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451009"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452556"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452010"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452201"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452627"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451029"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451903"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452289"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452458"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451502"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451084"

java.net.URI.schemeSpecificPart org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
8,367K (1.0%): String: 31501 / 100% objects

 Random sample 
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450907"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451485"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451488"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451891"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452137"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452386"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451742"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452249"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452222"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451039"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451375"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451376"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452479"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451611"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451054"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450828"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451238"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450932"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452299"
      "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451148"

java.net.URI.string org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
7,875K (0.9%): String: 31501 / 100% objects

 Random sample 
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450908"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451488"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452184"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451891"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451798"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452554"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451741"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452250"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452222"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451040"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451374"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451376"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452478"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451408"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451054"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450829"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451238"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450932"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452301"
      "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451148"

java.net.URI.schemeSpecificPart org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
6,693K (0.8%): String: 35698 / 100% objects

 Random sample 
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452009"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452375"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451377"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452316"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451792"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451038"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451020"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451818"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452107"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451082"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452069"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452097"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452037"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452524"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450885"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452523"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451682"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452372"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451943"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451130"

java.net.URI.path org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
5,864K (0.7%): String: 31278 / 100% objects

 Random sample 
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450958"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452338"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451494"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451694"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451969"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451891"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451748"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452329"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451481"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451153"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451380"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451608"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452595"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451416"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451118"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450981"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451621"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451039"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451848"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451263"

java.net.URI.path org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
5,796K (0.7%): String: 67451 / 100% objects

 Random sample 
      "ss_sold_date_sk=2452105"
      "ss_sold_date_sk=2451391"
      "ss_sold_date_sk=2451480"
      "ss_sold_date_sk=2452403"
      "ss_sold_date_sk=2452041"
      "ss_sold_date_sk=2452470"
      "ss_sold_date_sk=2450970"
      "ss_sold_date_sk=2451854"
      "ss_sold_date_sk=2452329"
      "ss_sold_date_sk=2451594"
      "ss_sold_date_sk=2451236"
      "ss_sold_date_sk=2452012"
      "ss_sold_date_sk=2452194"
      "ss_sold_date_sk=2452475"
      "ss_sold_date_sk=2451430"
      "ss_sold_date_sk=2451901"
      "ss_sold_date_sk=2451749"
      "ss_sold_date_sk=2451512"
      "ss_sold_date_sk=2450822"
      "ss_sold_date_sk=2450988"

org.apache.hadoop.hive.ql.plan.PartitionDesc.baseFileName {j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,687K (0.4%): String: 67434 / 100% objects

 Random sample 
      "2451672"
      "2450980"
      "2452296"
      "2451239"
      "2451875"
      "2451953"
      "2450942"
      "2451699"
      "2452482"
      "2451507"
      "2450919"
      "2451679"
      "2451165"
      "2450838"
      "2451401"
      "2451290"
      "2452468"
      "2450989"
      "2452509"
      "2451791"

{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.PartitionDesc.partSpec
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,671K (0.4%): String: 65561 / 100% objects

 Random sample 
      "42554862"
      "42258348"
      "42214034"
      "42283103"
      "95953611"
      "145303998"
      "42593445"
      "42373966"
      "145510428"
      "145422428"
      "42198629"
      "42161311"
      "42163986"
      "95940131"
      "145586677"
      "42303859"
      "96200732"
      "42789831"
      "95911736"
      "96367201"

j.u.Hashtable$Entry.value j.u.Hashtable$Entry[]
org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table
org.apache.hadoop.hive.ql.plan.PartitionDesc.properties
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,651K (0.4%): String: 35950 / 100% objects

 Random sample 
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"

java.net.URI.authority org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,370K (0.4%): String: 35950 / 100% objects

 Random sample 
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"

java.net.URI.host org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,199K (0.4%): String: 31501 / 100% objects

 Random sample 
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"
      "vc0501.halxg.cloudera.com:8020"

java.net.URI.authority org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
2,953K (0.3%): String: 31501 / 100% objects

 Random sample 
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"
      "vc0501.halxg.cloudera.com"

java.net.URI.host org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
2,420K (0.3%): String: 5826 / 100% objects

 Random sample 
      "true"
      "LOG"
      "3"
      "true"
      "262144"
      "true"
      "simple"
      "0"
      "10000"
      "2"
      "true"
      "15"
      "2147483647"
      "100"
      "false"
      "false"
      "65536"
      "org.apache.hadoop.hive.metastore.CDHMetaStoreSchemaInfo"
      "604800s"
      "false"

{j.u.Properties}.values org.apache.hadoop.mapred.JobConf.properties
org.apache.hadoop.hive.ql.exec.MapredContext.jobConf
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,963K (0.2%): String: 10471 / 100% objects

 Random sample 
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450986"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451017"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451102"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451704"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451457"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451977"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451713"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452440"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451843"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451854"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452234"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451927"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452250"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451195"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451428"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452510"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452447"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452107"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452002"
      "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452115"

java.net.URI.path org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.projectionPusher
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.recordReader
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.curReader
org.apache.spark.rdd.HadoopRDD$$anon$1.reader
org.apache.spark.rdd.HadoopRDD$$anon$1$$anonfun$2.$outer
org.apache.spark.TaskContext$$anon$1.f$1
{scala.collection.mutable.ArrayBuffer}
org.apache.spark.TaskContextImpl.onCompleteCallbacks
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,685K (0.2%): String: 35950 / 100% objects

 Random sample 
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"

java.net.URI.scheme org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,588K (0.2%): String: 16536 / 100% objects

 Random sample 
      "org.apache.hadoop.hive.conf.LoopingByteArrayInputStream@207ea13"
      "yarn-default.xml"
      "hdfs-default.xml"
      "programmatically"
      "because mapred.task.partition is deprecated"
      "core-default.xml"
      "hdfs-default.xml"
      "org.apache.hadoop.hive.conf.LoopingByteArrayInputStream@207ea13"
      "yarn-default.xml"
      "programmatically"
      "programmatically"
      "hdfs-default.xml"
      "core-default.xml"
      "mapred-default.xml"
      "yarn-default.xml"
      "yarn-default.xml"
      "org.apache.hadoop.hive.conf.LoopingByteArrayInputStream@207ea13"
      "hdfs-default.xml"
      "org.apache.hadoop.hive.conf.LoopingByteArrayInputStream@207ea13"
      "core-default.xml"

String[] {java.util.concurrent.ConcurrentHashMap}.values
org.apache.hadoop.mapred.JobConf.updatingResource
org.apache.hadoop.hive.ql.exec.MapredContext.jobConf
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,476K (0.2%): String: 31501 / 100% objects

 Random sample 
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"
      "hdfs"

java.net.URI.scheme org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
995K (0.1%): String: 6922 / 100% objects

 Random sample 
      "java.util.concurrent.ConcurrentNavigableMap"
      "scala.concurrent.Future$$anonfun$mapTo$1"
      "org.apache.avro.io.BinaryDecoder"
      "org.apache.spark.api.python.PythonBroadcast"
      "io.netty.util.internal.shaded.org.jctools.queues.atomic.BaseLinkedAtomicQueuePad1"
      "org.apache.spark.network.protocol.Message"
      "com.ctc.wstx.util.TextBuffer"
      "scala.collection.convert.WrapAsJava$"
      "org.apache.parquet.format.converter.ParquetMetadataConverter$SkipMetadataFilter"
      "com.google.protobuf.AbstractParser"
      "org.apache.hadoop.hive.ql.exec.FileSinkOperator$RecordWriter"
      "org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos$DisableErasureCodingPolicyRequestProto$1"
      "scala.collection.immutable.Set$EmptySet$"
      "org.apache.hadoop.ipc.CallerContext$CurrentCallerContextHolder"
      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$GetDatanodeReportResponseProto"
      "scala.sys.SystemProperties"
      "io.netty.channel.DefaultChannelPipeline$PendingHandlerAddedTask"
      "org.apache.spark.SSLOptions$$anonfun$7$$anonfun$apply$3"
      "com.fasterxml.jackson.databind.type.CollectionType"
      "org.apache.hadoop.hdfs.protocol.proto.AclProtos$RemoveAclResponseProtoOrBuilder"

{java.util.concurrent.ConcurrentHashMap}.keys sun.misc.Launcher$AppClassLoader.parallelLockMap
↖Java Static [org.apache.hive.com.esotericsoftware.reflectasm.AccessClassLoader.selfContextParentClassLoader], JNI Global [@c0056890]
892K (0.1%): String: 6923 / 100% objects

 Random sample 
      "1200000"
      "true"
      "5"
      "0"
      "false"
      "300"
      "*"
      "true"
      "false"
      "31"
      "0.5"
      "104857600"
      "5000ms"
      "false"
      "3000"
      "mine"
      "10"
      "10"
      "SAME_MOUNTPOINT"
      "true"

{j.u.Properties}.values org.apache.hadoop.mapred.JobConf.overlay
org.apache.hadoop.hive.ql.exec.MapredContext.jobConf
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 70,130 3,287K (0.4%) 72,143K (8.5%)j.u.Properties
Reference chains
Expensive data fields

57,085K (6.8%): j.u.Properties: 68081 / 100% objects

 Random sample 
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}

 ↖org.apache.hadoop.hive.ql.plan.TableDesc.properties
6,780K (0.8%): j.u.Properties: 86 / 100% objects

 Random sample 
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}

 ↖org.apache.hadoop.mapred.JobConf.overlay
6,780K (0.8%): j.u.Properties: 86 / 100% objects

 Random sample 
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}

 ↖org.apache.hadoop.mapred.JobConf.properties
1,196K (0.1%): j.u.Properties: 1823 / 100% objects

 Random sample 
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "145397963"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452636"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "95975503"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451111"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42417249"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451606"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "144793637"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451538"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42430702"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450951"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42346102"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450886"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42304366"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451636"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42206101"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450930"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42470422"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451234"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "96095805"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452537"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42471435"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450922"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "95901892"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451795"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "96124561"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452579"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "96320268"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451415"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42292203"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450853"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42519580"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451705"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42328828"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452457"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "145563676"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452254"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42575305"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451253"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "96088380"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451777"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}

 ↖com.google.common.collect.MapMakerInternalMap$WeakEntry.referent


Full reference chains

56,911K (6.7%): j.u.Properties: 67451 / 100% objects

 Random sample 
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}
      j.u.Properties(size: 19){("name", "default.store_sales"), ("EXTERNAL", "TRUE"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("spark.sql.sources.schema.part.0", "{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"), ("spark.sql.sources.schema.numPartCols", "1"), ("partition_columns", "ss_sold_date_sk"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("bucket_count", "-1"), ("columns.comments", ""), ("transient_lastDdlTime", "1529629575"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("serialization.format", "1"), ("spark.sql.sources.schema.partCol.0", "ss_sold_date_sk"), ("partition_columns.types", "bigint"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("spark.sql.sources.schema.numParts", "1"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]")}

org.apache.hadoop.hive.ql.plan.TableDesc.properties org.apache.hadoop.hive.ql.plan.PartitionDesc.tableDesc
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
4,811K (0.6%): j.u.Properties: 61 / 100% objects

 Random sample 
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}

org.apache.hadoop.mapred.JobConf.properties ↖Java Local(org.apache.hadoop.mapred.JobConf) [@c56bfa00,@c8cc83d0,@c8e31e08,@c8ef2768] ... and 58 more GC roots (31 thread(s))

4,811K (0.6%): j.u.Properties: 61 / 100% objects

 Random sample 
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2137){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}
      j.u.Properties(size: 2139){("yarn.app.mapreduce.client.job.retry-interval", "2000"), ("hive.metastore.hbase.aggr.stats.cache.entries", "10000"), ("hive.allow.udf.load.on.demand", "false"), ("hadoop.registry.zk.root", "/registry"), ("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"), ("hive.server2.map.fair.scheduler.queue", "true"), ("hadoop.security.kms.client.encrypted.key.cache.size", "500"), ("hbase.rpc.timeout", "60000"), ("hive.tez.task.scale.memory.reserve.fraction.max", "0.5"), ("mapreduce.shuffle.connection-keep-alive.timeout", "5"), ("mapreduce.reduce.markreset.buffer.percent", "0.0"), ("hive.fetch.task.conversion.threshold", "268435456"), ("fs.du.interval", "600000"), ("hive.map.aggr", "true"), ("dfs.data.transfer.client.tcpnodelay", "true"), ("hive.direct.sql.max.elements.in.clause", "1000"), ("hive.llap.remote.token.requires.signing", "true"), ("dfs.client.block.write.replace-datanode-on-failure.min-replication", "0"), ("hive.optimize.bucketmapjoin.sortedmerge", "false"), ("dfs.image.transfer.chunksize", "65536"), ("hive.llap.allow.permanent.fns", "true"), ("yarn.nodemanager.log.retain-seconds", "10800"), ("hbase.thrift.maxWorkerThreads", "1000"), ("rpc.engine.org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB", "org.apache.hadoop.ipc.ProtobufRpcEngine"), ("hive.optimize.sampling.orderby.number", "1000"), ("dfs.namenode.fs-limits.max-directory-items", "1048576"), ("hive.server2.idle.operation.timeout", "21600000"), ("hive.test.fail.heartbeater", "false"), ("yarn.nodemanager.runtime.linux.docker.capabilities", "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"), ("yarn.dispatcher.drain-events.timeout", "300000"), ...}

org.apache.hadoop.mapred.JobConf.overlay ↖Java Local(org.apache.hadoop.mapred.JobConf) [@c56bfa00,@c8cc83d0,@c8e31e08,@c8ef2768] ... and 58 more GC roots (31 thread(s))

981K (0.1%): j.u.Properties: 1495 / 100% objects

 Random sample 
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "145397963"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452636"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "145528502"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451859"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42417249"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451606"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "145503623"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451908"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42430702"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450951"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42470791"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451357"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42390971"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451661"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "95923145"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452151"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42307042"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451263"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "96095805"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452537"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "95773409"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451847"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "95901892"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451795"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "96124561"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452579"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "96320268"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451415"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42396214"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451573"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42199001"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452310"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42328828"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452457"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42222840"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451272"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "42575305"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451253"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}
      j.u.Properties(size: 16){("transient_lastDdlTime", "1529607398"), ("totalSize", "95910894"), ("file.outputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), ("location", "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452130"), ("file.inputformat", "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), ("serialization.lib", "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"), ("bucket_count", "-1"), ("partition_columns", "ss_sold_date_sk"), ("columns.comments", ""), ("partition_columns.types", "bigint"), ("serialization.format", "1"), ("serialization.ddl", "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"), ("columns", "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"), ("columns.types", "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]"), ("numFiles", "1"), ("name", "default.store_sales")}

com.google.common.collect.MapMakerInternalMap$WeakEntry.referent Object[]
java.util.concurrent.atomic.AtomicReferenceArray.array
com.google.common.collect.MapMakerInternalMap$Segment.table
com.google.common.collect.MapMakerInternalMap$Segment[]
com.google.common.collect.MapMakerInternalMap.segments
com.google.common.collect.Interners$CustomInterner.map
↖Java Static org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.INTERNER


 2,769,527 86,547K (10.2%) 33,747K (4.0%)j.u.Hashtable$Entry
Reference chains
Expensive data fields

23,199K (2.7%): j.u.Hashtable$Entry: 742368 / 100% objects

 Random sample 
      j.u.Hashtable$Entry(hash : 192243322, key : "bucket_count", value : "-1", next : null)
      j.u.Hashtable$Entry(hash : -1277165689, key : "serialization.lib", value : "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe", next : null)
      j.u.Hashtable$Entry(hash : -1912184175, key : "numFiles", value : "1", next : null)
      j.u.Hashtable$Entry(hash : 906585013, key : "serialization.format", value : "1", next : j.u.Hashtable$Entry@d1aad280)
      j.u.Hashtable$Entry(hash : -450247896, key : "columns.types", value : "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]", next : null)
      j.u.Hashtable$Entry(hash : -79118107, key : "columns.comments", value : "", next : j.u.Hashtable$Entry@ceb5c4f8)
      j.u.Hashtable$Entry(hash : -450247896, key : "columns.types", value : "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]", next : null)
      j.u.Hashtable$Entry(hash : 988153646, key : "transient_lastDdlTime", value : "1529607398", next : null)
      j.u.Hashtable$Entry(hash : -1909185841, key : "file.inputformat", value : "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat", next : null)
      j.u.Hashtable$Entry(hash : -415250552, key : "partition_columns", value : "ss_sold_date_sk", next : null)
      j.u.Hashtable$Entry(hash : -1912184175, key : "numFiles", value : "1", next : null)
      j.u.Hashtable$Entry(hash : -1909185841, key : "file.inputformat", value : "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat", next : null)
      j.u.Hashtable$Entry(hash : -415250552, key : "partition_columns", value : "ss_sold_date_sk", next : null)
      j.u.Hashtable$Entry(hash : -1277165689, key : "serialization.lib", value : "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe", next : null)
      j.u.Hashtable$Entry(hash : 906585013, key : "serialization.format", value : "1", next : j.u.Hashtable$Entry@ceb603f0)
      j.u.Hashtable$Entry(hash : -1912184175, key : "numFiles", value : "1", next : null)
      j.u.Hashtable$Entry(hash : 906585013, key : "serialization.format", value : "1", next : j.u.Hashtable$Entry@cb3b5ac0)
      j.u.Hashtable$Entry(hash : -79118107, key : "columns.comments", value : "", next : j.u.Hashtable$Entry@d0ebbec8)
      j.u.Hashtable$Entry(hash : -577311387, key : "totalSize", value : "42195346", next : j.u.Hashtable$Entry@cc215950)
      j.u.Hashtable$Entry(hash : 988153646, key : "transient_lastDdlTime", value : "1529607398", next : null)

j.u.Hashtable$Entry[] org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table
10,545K (1.2%): j.u.Hashtable$Entry: 337462 / 100% objects

 Random sample 
      j.u.Hashtable$Entry(hash : -1277173522, key : "serialization.ddl", value : "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]", next : j.u.Hashtable$Entry@d6eaa558)
      j.u.Hashtable$Entry(hash : -2004287697, key : "MessageDigest.SHA-256", value : "sun.security.provider.SHA2$SHA256", next : null)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451261", next : null)
      j.u.Hashtable$Entry(hash : -1627234864, key : "Alg.Alias.KeyFactory.1.2.840.10040.4.1", value : "DSA", next : null)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : 1116801907, key : "Alg.Alias.MessageDigest.2.16.840.1.101.3.4.2.1", value : "SHA-256", next : null)
      j.u.Hashtable$Entry(hash : 1124751301, key : "KeyPairGenerator.DSA ImplementedIn", value : "Software", next : null)
      j.u.Hashtable$Entry(hash : -1337775426, key : "KeyPairGenerator.DSA", value : "sun.security.provider.DSAKeyPairGenerator", next : null)
      j.u.Hashtable$Entry(hash : 746458700, key : "Alg.Alias.Signature.OID.2.16.840.1.101.3.4.3.2", value : "SHA256withDSA", next : null)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : 1116801909, key : "Alg.Alias.MessageDigest.2.16.840.1.101.3.4.2.3", value : "SHA-512", next : j.u.Hashtable$Entry@c23f1f48)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452116", next : null)
      j.u.Hashtable$Entry(hash : -1585476758, key : "file.outputformat", value : "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat", next : j.u.Hashtable$Entry@c8af59d0)
      j.u.Hashtable$Entry(hash : -1277173522, key : "serialization.ddl", value : "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]", next : j.u.Hashtable$Entry@d48b5340)
      j.u.Hashtable$Entry(hash : 1352270575, key : "KeyFactory.DSA", value : "sun.security.provider.DSAKeyFactory", next : null)
      j.u.Hashtable$Entry(hash : 193016505, key : "Alg.Alias.MessageDigest.OID.2.16.840.1.101.3.4.2.3", value : "SHA-512", next : null)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452477", next : null)
      j.u.Hashtable$Entry(hash : 1839965079, key : "Alg.Alias.Signature.DSAWithSHA1", value : "SHA1withDSA", next : j.u.Hashtable$Entry@c23a9eb0)
      j.u.Hashtable$Entry(hash : -1552398701, key : "partition_columns.types", value : "bigint", next : null)
      j.u.Hashtable$Entry(hash : 1284827856, key : "Provider.id version", value : "1.8", next : null)

 ↖j.u.Hashtable$Entry.{next}


Full reference chains

23,186K (2.7%): j.u.Hashtable$Entry: 741961 / 100% objects

 Random sample 
      j.u.Hashtable$Entry(hash : 192243322, key : "bucket_count", value : "-1", next : null)
      j.u.Hashtable$Entry(hash : 988153646, key : "transient_lastDdlTime", value : "1529607398", next : null)
      j.u.Hashtable$Entry(hash : -1912184175, key : "numFiles", value : "1", next : null)
      j.u.Hashtable$Entry(hash : 192243322, key : "bucket_count", value : "-1", next : null)
      j.u.Hashtable$Entry(hash : -450247896, key : "columns.types", value : "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]", next : null)
      j.u.Hashtable$Entry(hash : 3373707, key : "name", value : "default.store_sales", next : null)
      j.u.Hashtable$Entry(hash : -450247896, key : "columns.types", value : "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]", next : null)
      j.u.Hashtable$Entry(hash : -79118107, key : "columns.comments", value : "", next : j.u.Hashtable$Entry@cd07f498)
      j.u.Hashtable$Entry(hash : -1912184175, key : "numFiles", value : "1", next : null)
      j.u.Hashtable$Entry(hash : -415250552, key : "partition_columns", value : "ss_sold_date_sk", next : null)
      j.u.Hashtable$Entry(hash : 906585013, key : "serialization.format", value : "1", next : j.u.Hashtable$Entry@cb942478)
      j.u.Hashtable$Entry(hash : -1909185841, key : "file.inputformat", value : "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat", next : null)
      j.u.Hashtable$Entry(hash : -415250552, key : "partition_columns", value : "ss_sold_date_sk", next : null)
      j.u.Hashtable$Entry(hash : -1277165689, key : "serialization.lib", value : "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe", next : null)
      j.u.Hashtable$Entry(hash : 3373707, key : "name", value : "default.store_sales", next : null)
      j.u.Hashtable$Entry(hash : -450247896, key : "columns.types", value : "bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:bigint:int:decimal(7,2):decimal(7,2):decimal(7,2):decimal(7,2):decim ...[length 222]", next : null)
      j.u.Hashtable$Entry(hash : 906585013, key : "serialization.format", value : "1", next : j.u.Hashtable$Entry@cb3b5ac0)
      j.u.Hashtable$Entry(hash : -577311387, key : "totalSize", value : "42145970", next : j.u.Hashtable$Entry@cbb93718)
      j.u.Hashtable$Entry(hash : -577311387, key : "totalSize", value : "42195346", next : j.u.Hashtable$Entry@cc215950)
      j.u.Hashtable$Entry(hash : 3373707, key : "name", value : "default.store_sales", next : null)

j.u.Hashtable$Entry[] org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table
org.apache.hadoop.hive.ql.plan.PartitionDesc.properties
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
10,539K (1.2%): j.u.Hashtable$Entry: 337255 / 100% objects

 Random sample 
      j.u.Hashtable$Entry(hash : -1277173522, key : "serialization.ddl", value : "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]", next : j.u.Hashtable$Entry@d6eaa558)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451493", next : null)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451261", next : null)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : -1585476758, key : "file.outputformat", value : "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat", next : j.u.Hashtable$Entry@c546ee60)
      j.u.Hashtable$Entry(hash : -1585476758, key : "file.outputformat", value : "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat", next : j.u.Hashtable$Entry@cf6b7d98)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451372", next : null)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451635", next : null)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452116", next : null)
      j.u.Hashtable$Entry(hash : -1585476758, key : "file.outputformat", value : "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat", next : j.u.Hashtable$Entry@c8af59d0)
      j.u.Hashtable$Entry(hash : -1277173522, key : "serialization.ddl", value : "struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]", next : j.u.Hashtable$Entry@d48b5340)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : 1901043637, key : "location", value : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452477", next : null)
      j.u.Hashtable$Entry(hash : 949721053, key : "columns", value : "ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]", next : null)
      j.u.Hashtable$Entry(hash : -1552398701, key : "partition_columns.types", value : "bigint", next : null)
      j.u.Hashtable$Entry(hash : -1585476758, key : "file.outputformat", value : "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat", next : j.u.Hashtable$Entry@cb86a140)

j.u.Hashtable$Entry.{next} j.u.Hashtable$Entry[]
org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table
org.apache.hadoop.hive.ql.plan.PartitionDesc.properties
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 68,245 3,732K (0.4%) 21,354K (2.5%)j.u.LinkedHashMap
Reference chains
Expensive data fields

11,599K (1.4%): j.u.LinkedHashMap: 67488 / 100% objects

 Random sample 
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452106")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451559")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451953")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451691")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451352")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451508")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452475")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451953")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452237")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450816")}

 ↖org.apache.hadoop.hive.ql.plan.PartitionDesc.partSpec
3,229K (0.4%): j.u.LinkedHashMap: 37 / 100% objects

 Random sample 
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d7468818, org.apache.hadoop.hive.ql.plan.PartitionDesc@d4e20920), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d77fa9f0, org.apache.hadoop.hive.ql.plan.PartitionDesc@d4ffffc8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d76dc350, org.apache.hadoop.hive.ql.plan.PartitionDesc@d18bdcc8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d45191d0, org.apache.hadoop.hive.ql.plan.PartitionDesc@cb0a90b0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d77b61b8, org.apache.hadoop.hive.ql.plan.PartitionDesc@d14ceef8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d4bb9580, org.apache.hadoop.hive.ql.plan.PartitionDesc@cb7dc348), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@e26e2fd8, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6796b88), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d5160958, org.apache.hadoop.hive.ql.plan.PartitionDesc@cb442038), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d4784498, org.apache.hadoop.hive.ql.plan.PartitionDesc@c8cc1dc0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d586a170, org.apache.hadoop.hive.ql.plan.PartitionDesc@cba205b0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d35d0a98, org.apache.hadoop.hive.ql.plan.PartitionDesc@c95f5538), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d75d0210, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6467300), ...}

 ↖org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo
3,229K (0.4%): j.u.LinkedHashMap: 37 / 100% objects

 Random sample 
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c655bef8, org.apache.hadoop.hive.ql.plan.PartitionDesc@c84afe08), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c6be9630, org.apache.hadoop.hive.ql.plan.PartitionDesc@ca63f850), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c5fa9dc0, org.apache.hadoop.hive.ql.plan.PartitionDesc@cbe09e70), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c4c60d28, org.apache.hadoop.hive.ql.plan.PartitionDesc@ca4aff10), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d0f6baa8, org.apache.hadoop.hive.ql.plan.PartitionDesc@d3ec1678), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d156c8e8, org.apache.hadoop.hive.ql.plan.PartitionDesc@d2e852c8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d199db28, org.apache.hadoop.hive.ql.plan.PartitionDesc@d4ed4ea0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1468180, org.apache.hadoop.hive.ql.plan.PartitionDesc@d66ad1a8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c51d1470, org.apache.hadoop.hive.ql.plan.PartitionDesc@cab51e28), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1953908, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6f17918), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d47a1f60, org.apache.hadoop.hive.ql.plan.PartitionDesc@d5314aa0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1a03918, org.apache.hadoop.hive.ql.plan.PartitionDesc@d5e74d90), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1ae67e0, org.apache.hadoop.hive.ql.plan.PartitionDesc@d5652708), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c738fb38, org.apache.hadoop.hive.ql.plan.PartitionDesc@c92cc940), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d11bbff0, org.apache.hadoop.hive.ql.plan.PartitionDesc@d5ee0930), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c63028a0, org.apache.hadoop.hive.ql.plan.PartitionDesc@c9225c48), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d5130f90, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6ca56f8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@caf14170, org.apache.hadoop.hive.ql.plan.PartitionDesc@ccc66b10), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c7aa1758, org.apache.hadoop.hive.ql.plan.PartitionDesc@c86f8e90), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c776d530, org.apache.hadoop.hive.ql.plan.PartitionDesc@cae013a8), ...}

org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo {j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,229K (0.4%): j.u.LinkedHashMap: 37 / 100% objects

 Random sample 
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c51f0a00, j.u.ArrayList@c51f0e28), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c77fee80, j.u.ArrayList@c77ff2a8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c5fa9dc0, j.u.ArrayList@c5faa1e8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c5af45e0, j.u.ArrayList@c5af4a08), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d0f6baa8, j.u.ArrayList@d1077710), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d11bbff0, j.u.ArrayList@d11bc418), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d14d15b0, j.u.ArrayList@d14d19d8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d0d456c8, j.u.ArrayList@d0d4ba18), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c718e470, j.u.ArrayList@c718e898), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1468180, j.u.ArrayList@d14685a8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d199db28, j.u.ArrayList@d199df50), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1a03918, j.u.ArrayList@d1a9dd88), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1953908, j.u.ArrayList@d1953d30), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@cb5a67f8, j.u.ArrayList@cb5a6c20), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d5130f90, j.u.ArrayList@d51313b8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c51d1470, j.u.ArrayList@c51d1898), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d47a1f60, j.u.ArrayList@d47cfce0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c5c32ea8, j.u.ArrayList@c5c332d0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c795e6d8, j.u.ArrayList@c797d9b8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c4c60d28, j.u.ArrayList@c4c61150), ...}

org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases {j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


Full reference chains

11,593K (1.4%): j.u.LinkedHashMap: 67451 / 100% objects

 Random sample 
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452106")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450989")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451559")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452512")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451953")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452467")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451332")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451506")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452329")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451691")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451680")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451352")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451508")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452475")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451186")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451761")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2451953")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2450821")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452237")}
      j.u.LinkedHashMap(size: 1){("ss_sold_date_sk", "2452114")}

org.apache.hadoop.hive.ql.plan.PartitionDesc.partSpec {j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,229K (0.4%): j.u.LinkedHashMap: 37 / 100% objects

 Random sample 
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c655bef8, org.apache.hadoop.hive.ql.plan.PartitionDesc@c84afe08), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c6be9630, org.apache.hadoop.hive.ql.plan.PartitionDesc@ca63f850), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c5fa9dc0, org.apache.hadoop.hive.ql.plan.PartitionDesc@cbe09e70), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c4c60d28, org.apache.hadoop.hive.ql.plan.PartitionDesc@ca4aff10), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d0f6baa8, org.apache.hadoop.hive.ql.plan.PartitionDesc@d3ec1678), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d156c8e8, org.apache.hadoop.hive.ql.plan.PartitionDesc@d2e852c8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d199db28, org.apache.hadoop.hive.ql.plan.PartitionDesc@d4ed4ea0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1468180, org.apache.hadoop.hive.ql.plan.PartitionDesc@d66ad1a8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c51d1470, org.apache.hadoop.hive.ql.plan.PartitionDesc@cab51e28), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1953908, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6f17918), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d47a1f60, org.apache.hadoop.hive.ql.plan.PartitionDesc@d5314aa0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1a03918, org.apache.hadoop.hive.ql.plan.PartitionDesc@d5e74d90), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1ae67e0, org.apache.hadoop.hive.ql.plan.PartitionDesc@d5652708), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c738fb38, org.apache.hadoop.hive.ql.plan.PartitionDesc@c92cc940), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d11bbff0, org.apache.hadoop.hive.ql.plan.PartitionDesc@d5ee0930), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c63028a0, org.apache.hadoop.hive.ql.plan.PartitionDesc@c9225c48), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d5130f90, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6ca56f8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@caf14170, org.apache.hadoop.hive.ql.plan.PartitionDesc@ccc66b10), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c7aa1758, org.apache.hadoop.hive.ql.plan.PartitionDesc@c86f8e90), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c776d530, org.apache.hadoop.hive.ql.plan.PartitionDesc@cae013a8), ...}

org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo {j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,229K (0.4%): j.u.LinkedHashMap: 37 / 100% objects

 Random sample 
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c51f0a00, j.u.ArrayList@c51f0e28), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c77fee80, j.u.ArrayList@c77ff2a8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c5fa9dc0, j.u.ArrayList@c5faa1e8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c5af45e0, j.u.ArrayList@c5af4a08), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d0f6baa8, j.u.ArrayList@d1077710), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d11bbff0, j.u.ArrayList@d11bc418), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d14d15b0, j.u.ArrayList@d14d19d8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d0d456c8, j.u.ArrayList@d0d4ba18), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c718e470, j.u.ArrayList@c718e898), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1468180, j.u.ArrayList@d14685a8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d199db28, j.u.ArrayList@d199df50), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1a03918, j.u.ArrayList@d1a9dd88), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d1953908, j.u.ArrayList@d1953d30), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@cb5a67f8, j.u.ArrayList@cb5a6c20), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d5130f90, j.u.ArrayList@d51313b8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c51d1470, j.u.ArrayList@c51d1898), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d47a1f60, j.u.ArrayList@d47cfce0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c5c32ea8, j.u.ArrayList@c5c332d0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c795e6d8, j.u.ArrayList@c797d9b8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@c4c60d28, j.u.ArrayList@c4c61150), ...}

org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases {j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
2,705K (0.3%): j.u.LinkedHashMap: 31 / 100% objects

 Random sample 
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d7468818, org.apache.hadoop.hive.ql.plan.PartitionDesc@d4e20920), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d75a4450, org.apache.hadoop.hive.ql.plan.PartitionDesc@d59892b0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d76dc350, org.apache.hadoop.hive.ql.plan.PartitionDesc@d18bdcc8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d50c8438, org.apache.hadoop.hive.ql.plan.PartitionDesc@ca012200), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d77b61b8, org.apache.hadoop.hive.ql.plan.PartitionDesc@d14ceef8), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d77e7e60, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6d2f638), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d74246c0, org.apache.hadoop.hive.ql.plan.PartitionDesc@d508e510), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d7712620, org.apache.hadoop.hive.ql.plan.PartitionDesc@d090a040), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d437b940, org.apache.hadoop.hive.ql.plan.PartitionDesc@caaa7198), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@e26e2fd8, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6796b88), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@e2325ab0, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6a14d28), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d5160958, org.apache.hadoop.hive.ql.plan.PartitionDesc@cb442038), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d4784498, org.apache.hadoop.hive.ql.plan.PartitionDesc@c8cc1dc0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d586a170, org.apache.hadoop.hive.ql.plan.PartitionDesc@cba205b0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d5150e30, org.apache.hadoop.hive.ql.plan.PartitionDesc@cb019898), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d2d9dca0, org.apache.hadoop.hive.ql.plan.PartitionDesc@c8e647b0), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d35d0a98, org.apache.hadoop.hive.ql.plan.PartitionDesc@c95f5538), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d3349b78, org.apache.hadoop.hive.ql.plan.PartitionDesc@ca264a20), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d75d0210, org.apache.hadoop.hive.ql.plan.PartitionDesc@d6467300), ...}
      j.u.LinkedHashMap(size: 1823){(org.apache.hadoop.fs.Path@d2710858, org.apache.hadoop.hive.ql.plan.PartitionDesc@cbf1a018), ...}

org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo ↖Java Local(org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher) [@d1c1b630,@d1c2af88,@d1c2b228,@d1c3ea88] ... and 27 more GC roots (31 thread(s))



 135,145 10,558K (1.2%) 10,558K (1.2%)java.net.URI
Reference chains
Expensive data fields

10,557K (1.2%): java.net.URI: 135141 / 100% objects

 Random sample 
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451718", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451718", hash : 665245764, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451718")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450943", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450943", hash : 665217983, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450943")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451519", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451519", hash : 665243843, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451519")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452019", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452019", hash : 665268829, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452019")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452044", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452044", hash : 665268917, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452044")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451427", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451427", hash : 665242911, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451427")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450975", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450975", hash : 665218078, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450975")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451107", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451107", hash : 665239966, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451107")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451735", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451735", hash : 665245823, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451735")

 ↖org.apache.hadoop.fs.Path.uri


Full reference chains

4,415K (0.5%): java.net.URI: 56513 / 100% objects

 Random sample 
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451718", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451718", hash : 665245764, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451718")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450939", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450939", hash : 665217958, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450939")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450943", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450943", hash : 665217983, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450943")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451387", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451387", hash : 665242136, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451387")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451519", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451519", hash : 665243843, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451519")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452473", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452473", hash : 665272853, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452473")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452509", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452509", hash : 665273603, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452509")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451775", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451775", hash : 665245947, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451775")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451231", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451231", hash : 665241014, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451231")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452019", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452019", hash : 665268829, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452019")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451371", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451371", hash : 665242099, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451371")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452044", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452044", hash : 665268917, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452044")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451427", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451427", hash : 665242911, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451427")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450975", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450975", hash : 665218078, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450975")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452430", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452430", hash : 665272726, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452430")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451512", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451512", hash : 665243836, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451512")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451107", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451107", hash : 665239966, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451107")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451478", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451478", hash : 665243067, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451478")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451735", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451735", hash : 665245823, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451735")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451700", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451700", hash : 665245725, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451700")

org.apache.hadoop.fs.Path.uri {j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo
↖Java Local(org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher) [@d1c1b630,@d1c2af88,@d1c2b228,@d1c3ea88] ... and 27 more GC roots (31 thread(s))

2,808K (0.3%): java.net.URI: 35950 / 100% objects

 Random sample 
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451204", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451204", hash : 2003202410, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451204", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451204")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452298", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452298", hash : -1409992400, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452298", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452298")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450881", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450881", hash : -1208400434, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450881", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450881")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452201", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452201", hash : -1691722610, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452201", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452201")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451329", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451329", hash : -468174362, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451329", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451329")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451305", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451305", hash : 1779485672, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451305", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451305")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452525", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452525", hash : -408624983, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452525", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452525")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451411", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451411", hash : -1932683380, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451411", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451411")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451991", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451991", hash : 191578735, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451991", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451991")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451276", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451276", hash : 206605551, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451276", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451276")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451436", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451436", hash : 145643689, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451436", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451436")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451159", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451159", hash : -1368826517, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451159", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451159")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451323", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451323", hash : -654293204, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451323", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451323")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451750", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451750", hash : 1118543246, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451750", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451750")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452396", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452396", hash : -1726768559, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452396", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452396")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450991", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450991", hash : -501522962, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450991", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450991")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451620", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451620", hash : -1511562260, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451620", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451620")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450949", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450949", hash : -766467295, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450949", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450949")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450902", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450902", hash : -535094716, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450902", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450902")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451315", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451315", hash : -1553867607, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451315", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451315")

org.apache.hadoop.fs.Path.uri {j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
2,461K (0.3%): java.net.URI: 31501 / 100% objects

 Random sample 
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452385", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452385", hash : 1575564913, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452385", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452385")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451377", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451377", hash : -17111187, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451377", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451377")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452288", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452288", hash : 1923360879, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452288", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452288")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452545", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452545", hash : 1514603051, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452545", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452545")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451188", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451188", hash : 1484995727, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451188", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451188")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450842", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450842", hash : -728869399, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450842", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450842")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451908", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451908", hash : 344125823, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451908", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451908")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451616", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451616", hash : 2007909861, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451616", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451616")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452493", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452493", hash : -2074564525, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452493", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452493")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452193", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452193", hash : -1310354890, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452193", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452193")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451076", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451076", hash : 716078641, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451076", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451076")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452510", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452510", hash : -1525338035, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452510", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452510")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451382", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451382", hash : 789403795, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451382", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451382")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451844", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451844", hash : 26271912, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451844", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451844")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451995", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451995", hash : 315657963, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451995", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451995")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452582", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452582", hash : 973032402, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452582", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452582")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450954", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450954", hash : 40047687, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450954", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450954")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450834", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450834", hash : -1628443802, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450834", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450834")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452293", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452293", hash : -1565091435, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452293", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452293")
      java.net.URI(scheme : "hdfs", fragment : null, authority : "vc0501.halxg.cloudera.com:8020", userInfo : null, host : "vc0501.halxg.cloudera.com", port : 8020, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452036", query : null, schemeSpecificPart : "//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452036", hash : 1857691566, decodedUserInfo : null, decodedAuthority : null, decodedPath : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452036", decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452036")

org.apache.hadoop.fs.Path.uri {j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
854K (0.1%): java.net.URI: 10938 / 100% objects

 Random sample 
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451874", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451874", hash : 665246907, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451874")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451521", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451521", hash : 665243866, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451521")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452267", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452267", hash : 665270904, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452267")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451997", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451997", hash : 665247933, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451997")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452470", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452470", hash : 665272850, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452470")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451975", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451975", hash : 665247869, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451975")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451579", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451579", hash : 665244029, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451579")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452169", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452169", hash : 665269945, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452169")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451136", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451136", hash : 665240058, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451136")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451143", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451143", hash : 665240086, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451143")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450914", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450914", hash : 665217891, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450914")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451301", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451301", hash : 665241882, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451301")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451280", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451280", hash : 665241168, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451280")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452436", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452436", hash : 665272732, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452436")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452542", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452542", hash : 665273720, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452542")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451116", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451116", hash : 665239996, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451116")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451177", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451177", hash : 665240183, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451177")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452403", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452403", hash : 665272636, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452403")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450952", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450952", hash : 665218013, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450952")
      java.net.URI(scheme : null, fragment : null, authority : null, userInfo : null, host : null, port : -1, path : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452502", query : null, schemeSpecificPart : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452502", hash : 665273596, decodedUserInfo : null, decodedAuthority : null, decodedPath : null, decodedQuery : null, decodedFragment : null, decodedSchemeSpecificPart : null, string : "/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452502")

org.apache.hadoop.fs.Path.uri {j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.projectionPusher
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.recordReader
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.curReader
org.apache.spark.rdd.HadoopRDD$$anon$1.reader
org.apache.spark.rdd.HadoopRDD$$anon$1$$anonfun$2.$outer
org.apache.spark.TaskContext$$anon$1.f$1
{scala.collection.mutable.ArrayBuffer}
org.apache.spark.TaskContextImpl.onCompleteCallbacks
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 391 24K (< 0.1%) 9,362K (1.1%)java.util.concurrent.ConcurrentHashMap
Reference chains
Expensive data fields

8,404K (1.0%): java.util.concurrent.ConcurrentHashMap: 86 / 100% objects

 Random sample 
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}

 ↖org.apache.hadoop.mapred.JobConf.updatingResource


Full reference chains

6,032K (0.7%): java.util.concurrent.ConcurrentHashMap: 61 / 100% objects

 Random sample 
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}

org.apache.hadoop.mapred.JobConf.updatingResource ↖Java Local(org.apache.hadoop.mapred.JobConf) [@c56bfa00,@c8cc83d0,@c8e31e08,@c8ef2768] ... and 58 more GC roots (31 thread(s))

593K (< 0.1%): java.util.concurrent.ConcurrentHashMap: 6 / 100% objects

 Random sample 
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}

org.apache.hadoop.mapred.JobConf.updatingResource org.apache.spark.rdd.HadoopRDD$$anon$1.jobConf
org.apache.spark.rdd.HadoopRDD$$anon$1$$anonfun$2.$outer
org.apache.spark.TaskContext$$anon$1.f$1
{scala.collection.mutable.ArrayBuffer}
org.apache.spark.TaskContextImpl.onCompleteCallbacks
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
593K (< 0.1%): java.util.concurrent.ConcurrentHashMap: 6 / 100% objects

 Random sample 
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}
      java.util.concurrent.ConcurrentHashMap(size: 0){("hadoop.security.group.mapping.ldap.posix.attr.uid.name", String[](1){"core-default.xml"})}

org.apache.hadoop.mapred.JobConf.updatingResource org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.jobConf
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.recordReader
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.curReader
org.apache.spark.rdd.HadoopRDD$$anon$1.reader
org.apache.spark.rdd.HadoopRDD$$anon$1$$anonfun$2.$outer
org.apache.spark.TaskContext$$anon$1.f$1
{scala.collection.mutable.ArrayBuffer}
org.apache.spark.TaskContextImpl.onCompleteCallbacks
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 137,752 23,460K (2.8%) 7,382K (0.9%)j.u.Hashtable$Entry[]
Reference chains
Expensive data fields

7,381K (0.9%): j.u.Hashtable$Entry[]: 67488 / 100% objects

 Random sample 
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d6ae3cc0, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cd9336a8, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c71ba638, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d1b82d68, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d5a22988, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cade1968, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@ca879938, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c9c563e8, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d16f0218, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d3a44348, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c7c21858, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d3199b68, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c95a0c60, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cb414888, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c67b5b88, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@ceb60550, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cf6bdda8, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cdb40278, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cd1c8668, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cda23a78, ...}

 ↖org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table


Full reference chains

7,377K (0.9%): j.u.Hashtable$Entry[]: 67451 / 100% objects

 Random sample 
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d6ae3cc0, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cccecf30, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c71ba638, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cbf11d38, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d5a22988, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cba385a8, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@ccc4e2a0, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d5316920, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cb5205f0, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d3a44348, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c9c33ed8, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d3199b68, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c95a0c60, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cb414888, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d2c04338, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c9037458, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cf6bdda8, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@d425c430, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@cd1c8668, ...}
      j.u.Hashtable$Entry[](23){j.u.Hashtable$Entry@c783d6e8, ...}

org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table org.apache.hadoop.hive.ql.plan.PartitionDesc.properties
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 71,641 1,679K (0.2%) 5,557K (0.7%)j.u.ArrayList
Reference chains
Expensive data fields

5,269K (0.6%): j.u.ArrayList: 67451 / 100% objects

 Random sample 
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}

{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


Full reference chains

5,269K (0.6%): j.u.ArrayList: 67451 / 100% objects

 Random sample 
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}
      j.u.ArrayList(size: 1){"store_sales"}

{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 67,488 3,690K (0.4%) 3,690K (0.4%)org.apache.hadoop.hive.common.CopyOnFirstWriteProperties
Reference chains
Expensive data fields

3,690K (0.4%): org.apache.hadoop.hive.common.CopyOnFirstWriteProperties: 67488 / 100% objects

 Random sample 
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c61c87f8, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cf3112f0)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@ceb60300, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d6382a60, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c7504688)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c710f9e0, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c7e79508, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c5b60538)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d112d4b8, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d12208a8, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d1525350, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c8a0c730, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c95897d8, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c6ce97e0)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d18518a8, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c9c29930, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c7cf53e8)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d3a3d718, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c77d9980)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cfb5eac0, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cf310160)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d407a200, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cd933458, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cba0c730, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c6983fe0)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cda23828, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cbf09a10, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c8cb7e40)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cdb40028, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cd807178)

 ↖org.apache.hadoop.hive.ql.plan.PartitionDesc.properties


Full reference chains

3,688K (0.4%): org.apache.hadoop.hive.common.CopyOnFirstWriteProperties: 67451 / 100% objects

 Random sample 
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c61c87f8, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cf3112f0)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cc316ba0, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c7bd2968)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d6382a60, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c7504688)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cd1c3c98, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c503d5f8)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c7e79508, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c5b60538)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cf6b9f30, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cf32a4e8)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cc3b8b20, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c7e035d8)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c8362ca8, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c77d9980)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cfaa2970, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cf2a4b30)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c95897d8, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c6ce97e0)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d318bf90, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c8126bd0)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c9c29930, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c7cf53e8)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d3a3d718, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c77d9980)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cfb5eac0, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cf310160)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@ca22cc10, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c8cbfb58)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d411ade0, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c687a8a8)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cba0c730, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c6983fe0)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@c8e5cb08, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c8cbfb58)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@cbf09a10, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@c8cb7e40)
      org.apache.hadoop.hive.common.CopyOnFirstWriteProperties(table : j.u.Hashtable$Entry[](23)@d5f3a588, count : 16, threshold : 17, loadFactor : 0.75, modCount : 17, keySet : null, entrySet : null, values : null, defaults : null, interned : j.u.Properties@cf309958)

org.apache.hadoop.hive.ql.plan.PartitionDesc.properties {j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 67,488 2,636K (0.3%) 2,636K (0.3%)org.apache.hadoop.hive.ql.plan.PartitionDesc
Reference chains
Expensive data fields

2,634K (0.3%): org.apache.hadoop.hive.ql.plan.PartitionDesc: 67451 / 100% objects

 Random sample 
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d6bd73c8, partSpec : j.u.LinkedHashMap@d6bd7748, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d6bd7830, baseFileName : "ss_sold_date_sk=2451493", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@cc735320, partSpec : j.u.LinkedHashMap@cc7356a0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@cc735788, baseFileName : "ss_sold_date_sk=2451000", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c71362c0, partSpec : j.u.LinkedHashMap@c7136708, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c7136428, baseFileName : "ss_sold_date_sk=2452412", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c8822ad8, partSpec : j.u.LinkedHashMap@c883bd78, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c883be60, baseFileName : "ss_sold_date_sk=2451007", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d5bd36f0, partSpec : j.u.LinkedHashMap@d5bd3a70, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d5bd3b58, baseFileName : "ss_sold_date_sk=2451617", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c9891038, partSpec : j.u.LinkedHashMap@c99134e0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c99135c8, baseFileName : "ss_sold_date_sk=2452340", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@ccb62100, partSpec : j.u.LinkedHashMap@ccb80e98, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@ccb687a8, baseFileName : "ss_sold_date_sk=2451350", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d5a7ff90, partSpec : j.u.LinkedHashMap@d5a80310, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d5a803f8, baseFileName : "ss_sold_date_sk=2451744", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c6d2b8c0, partSpec : j.u.LinkedHashMap@c6d2bc40, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c6d2bd28, baseFileName : "ss_sold_date_sk=2451922", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d4885c88, partSpec : j.u.LinkedHashMap@d4886008, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d48860f0, baseFileName : "ss_sold_date_sk=2451471", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c4a20150, partSpec : j.u.LinkedHashMap@c4a205a0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c4a202c0, baseFileName : "ss_sold_date_sk=2452256", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d26a5330, partSpec : j.u.LinkedHashMap@d26a56b0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d26a5798, baseFileName : "ss_sold_date_sk=2452437", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@cb1177e8, partSpec : j.u.LinkedHashMap@cb117b68, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@cb117c50, baseFileName : "ss_sold_date_sk=2450850", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c6aa3358, partSpec : j.u.LinkedHashMap@c6aa36d8, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c6aa37c0, baseFileName : "ss_sold_date_sk=2451989", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d31f53f0, partSpec : j.u.LinkedHashMap@d31f5770, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d31f5858, baseFileName : "ss_sold_date_sk=2451674", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@cae001d0, partSpec : j.u.LinkedHashMap@cae00550, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@cae00638, baseFileName : "ss_sold_date_sk=2451120", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@cb4cb8c0, partSpec : j.u.LinkedHashMap@cb4cbc40, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@cb4cbd28, baseFileName : "ss_sold_date_sk=2451228", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d4c27040, partSpec : j.u.LinkedHashMap@d4c273c0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d4c274a8, baseFileName : "ss_sold_date_sk=2452061", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c5380ae0, partSpec : j.u.LinkedHashMap@c538e110, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c538de30, baseFileName : "ss_sold_date_sk=2452156", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c9246138, partSpec : j.u.LinkedHashMap@c9246588, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c92462a8, baseFileName : "ss_sold_date_sk=2451886", vectorPartitionDesc : null)

{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


Full reference chains

2,634K (0.3%): org.apache.hadoop.hive.ql.plan.PartitionDesc: 67451 / 100% objects

 Random sample 
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d6bd73c8, partSpec : j.u.LinkedHashMap@d6bd7748, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d6bd7830, baseFileName : "ss_sold_date_sk=2451493", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@cc735320, partSpec : j.u.LinkedHashMap@cc7356a0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@cc735788, baseFileName : "ss_sold_date_sk=2451000", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c71362c0, partSpec : j.u.LinkedHashMap@c7136708, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c7136428, baseFileName : "ss_sold_date_sk=2452412", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c8822ad8, partSpec : j.u.LinkedHashMap@c883bd78, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c883be60, baseFileName : "ss_sold_date_sk=2451007", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d5bd36f0, partSpec : j.u.LinkedHashMap@d5bd3a70, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d5bd3b58, baseFileName : "ss_sold_date_sk=2451617", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c9891038, partSpec : j.u.LinkedHashMap@c99134e0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c99135c8, baseFileName : "ss_sold_date_sk=2452340", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@ccb62100, partSpec : j.u.LinkedHashMap@ccb80e98, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@ccb687a8, baseFileName : "ss_sold_date_sk=2451350", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d5a7ff90, partSpec : j.u.LinkedHashMap@d5a80310, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d5a803f8, baseFileName : "ss_sold_date_sk=2451744", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c6d2b8c0, partSpec : j.u.LinkedHashMap@c6d2bc40, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c6d2bd28, baseFileName : "ss_sold_date_sk=2451922", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d4885c88, partSpec : j.u.LinkedHashMap@d4886008, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d48860f0, baseFileName : "ss_sold_date_sk=2451471", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c4a20150, partSpec : j.u.LinkedHashMap@c4a205a0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c4a202c0, baseFileName : "ss_sold_date_sk=2452256", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d26a5330, partSpec : j.u.LinkedHashMap@d26a56b0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d26a5798, baseFileName : "ss_sold_date_sk=2452437", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@cb1177e8, partSpec : j.u.LinkedHashMap@cb117b68, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@cb117c50, baseFileName : "ss_sold_date_sk=2450850", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c6aa3358, partSpec : j.u.LinkedHashMap@c6aa36d8, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c6aa37c0, baseFileName : "ss_sold_date_sk=2451989", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d31f53f0, partSpec : j.u.LinkedHashMap@d31f5770, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d31f5858, baseFileName : "ss_sold_date_sk=2451674", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@cae001d0, partSpec : j.u.LinkedHashMap@cae00550, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@cae00638, baseFileName : "ss_sold_date_sk=2451120", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@cb4cb8c0, partSpec : j.u.LinkedHashMap@cb4cbc40, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@cb4cbd28, baseFileName : "ss_sold_date_sk=2451228", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@d4c27040, partSpec : j.u.LinkedHashMap@d4c273c0, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@d4c274a8, baseFileName : "ss_sold_date_sk=2452061", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c5380ae0, partSpec : j.u.LinkedHashMap@c538e110, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c538de30, baseFileName : "ss_sold_date_sk=2452156", vectorPartitionDesc : null)
      org.apache.hadoop.hive.ql.plan.PartitionDesc(tableDesc : org.apache.hadoop.hive.ql.plan.TableDesc@c9246138, partSpec : j.u.LinkedHashMap@c9246588, inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : org.apache.hadoop.hive.common.CopyOnFirstWriteProperties@c92462a8, baseFileName : "ss_sold_date_sk=2451886", vectorPartitionDesc : null)

{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 68,081 2,127K (0.3%) 2,127K (0.3%)org.apache.hadoop.hive.ql.plan.TableDesc
Reference chains
Expensive data fields

2,109K (0.2%): org.apache.hadoop.hive.ql.plan.TableDesc: 67488 / 100% objects

 Random sample 
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c61c6ec0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@ceb5ff48, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d637c2d0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c710e330, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c7e738b0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d112d038, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d121f178, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d1524ed0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c8940d18, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c958af40, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d1851460, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c9c2a6b0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d3a365b8, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cfb5e640, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d4079d80, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cd9330a0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cba0cbb0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cda23470, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cbf0a790, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cdb3fc70, jobProperties : null)

 ↖org.apache.hadoop.hive.ql.plan.PartitionDesc.tableDesc


Full reference chains

2,107K (0.2%): org.apache.hadoop.hive.ql.plan.TableDesc: 67451 / 100% objects

 Random sample 
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c61c6ec0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cc317920, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d637c2d0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cd1cca28, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c7e738b0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cf6bb5b0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cc3b9368, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c8361a40, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cfa9f078, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c958af40, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d3186e48, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c9c2a6b0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d3a365b8, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cfb5e640, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@ca22eb90, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d4115d40, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cba0cbb0, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@c8e5ab90, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@cbf0a790, jobProperties : null)
      org.apache.hadoop.hive.ql.plan.TableDesc(inputFileFormatClass : j.l.Class@cd80c260, outputFileFormatClass : j.l.Class@cd817e90, properties : j.u.Properties@d5f31af0, jobProperties : null)

org.apache.hadoop.hive.ql.plan.PartitionDesc.tableDesc {j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 135,141 2,111K (0.2%) 2,111K (0.2%)org.apache.hadoop.fs.Path
Reference chains
Expensive data fields

1,053K (0.1%): org.apache.hadoop.fs.Path: 67451 / 100% objects

 Random sample 
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2a15008)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d780fd00)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d227d388)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d39ee7c8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2087ba0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d4e8c890)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d27c2290)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2fdb850)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d4729ae8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2156288)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d5d24508)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2615160)
      org.apache.hadoop.fs.Path(uri : java.net.URI@e20e6bd0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d73317e0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d3483b00)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2328f60)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d4866128)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d735b5f8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d55e5bb0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d34a0830)

{j.u.LinkedHashMap}.keys org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo


Full reference chains

883K (0.1%): org.apache.hadoop.fs.Path: 56513 / 100% objects

 Random sample 
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2a15008)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d719ccf0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d227d388)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d710de78)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2087ba0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@e1ebcca8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d58fbe58)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2961a40)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d72f6440)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2156288)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d761c080)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2615160)
      org.apache.hadoop.fs.Path(uri : java.net.URI@e20e6bd0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d73317e0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d40611d0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d72d1928)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d4866128)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d2d2fad8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d55e5bb0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d31ba588)

{j.u.LinkedHashMap}.keys org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo
↖Java Local(org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher) [@d1c1b630,@d1c2af88,@d1c2b228,@d1c3ea88] ... and 27 more GC roots (31 thread(s))

561K (< 0.1%): org.apache.hadoop.fs.Path: 35950 / 100% objects

 Random sample 
      org.apache.hadoop.fs.Path(uri : java.net.URI@d38fbb28)
      org.apache.hadoop.fs.Path(uri : java.net.URI@cb1b8308)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d0cdc698)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d12eed98)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d011c730)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d3d63350)
      org.apache.hadoop.fs.Path(uri : java.net.URI@cacffe48)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d01fb360)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c4d9e950)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c7314b38)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d0fa4590)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c507d070)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d10ea960)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d328e728)
      org.apache.hadoop.fs.Path(uri : java.net.URI@cebe3658)
      org.apache.hadoop.fs.Path(uri : java.net.URI@cbe608e0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c5837788)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c6a503f0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@cbe9f820)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c79e5f08)

{j.u.LinkedHashMap}.keys org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
492K (< 0.1%): org.apache.hadoop.fs.Path: 31501 / 100% objects

 Random sample 
      org.apache.hadoop.fs.Path(uri : java.net.URI@d0d64f98)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c4e9b648)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d076e880)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d06f8ae8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d03c96d0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d1658608)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d414b650)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d0949ce8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c56266a8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c80b2d88)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d1586a70)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c57303b8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d079c648)
      org.apache.hadoop.fs.Path(uri : java.net.URI@d102bd10)
      org.apache.hadoop.fs.Path(uri : java.net.URI@cec2c4c0)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c9bcc278)
      org.apache.hadoop.fs.Path(uri : java.net.URI@caf8b358)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c66f4948)
      org.apache.hadoop.fs.Path(uri : java.net.URI@c9c092e8)
      org.apache.hadoop.fs.Path(uri : java.net.URI@cf23ba38)

{j.u.LinkedHashMap}.keys org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


 37 1,184K (0.1%) 1,184K (0.1%)org.apache.spark.unsafe.memory.MemoryBlock[]
Reference chains
Expensive data fields

1,184K (0.1%): org.apache.spark.unsafe.memory.MemoryBlock[]: 37 / 100% objects

 Random sample 
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}

org.apache.spark.memory.TaskMemoryManager.pageTable org.apache.spark.TaskContextImpl.taskMemoryManager
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


Full reference chains

1,184K (0.1%): org.apache.spark.unsafe.memory.MemoryBlock[]: 37 / 100% objects

 Random sample 
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}
      org.apache.spark.unsafe.memory.MemoryBlock[](8192){null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, ...}

org.apache.spark.memory.TaskMemoryManager.pageTable org.apache.spark.TaskContextImpl.taskMemoryManager
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService






3. Where Memory Goes, by GC Root (Check for Memory Leaks)

251,868K (29.8%) Object tree for GC root(s) Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
                                  1. String self 17,427K (2.1%), 65,609 object(s)
                                1. j.u.Hashtable$Entry self 10,539K (1.2%), 337,255 object(s)
                              1. j.u.Hashtable$Entry self 23,186K (2.7%), 741,961 object(s)
                                1. String self 3,671K (0.4%), 65,561 object(s)
                                1. String self 5K (< 0.1%), 86 object(s)
                            1. j.u.Hashtable$Entry[] self 7,377K (0.9%), 67,451 object(s)
                          1. org.apache.hadoop.hive.common.CopyOnFirstWriteProperties self 3,688K (0.4%), 67,451 object(s)
                            1. j.u.Properties self 56,911K (6.7%), 67,451 object(s)
                            2. ... 3 more references together retaining 364K (< 0.1%)
                          1. org.apache.hadoop.hive.ql.plan.TableDesc self 2,107K (0.2%), 67,451 object(s)
                          1. j.u.LinkedHashMap self 11,593K (1.4%), 67,451 object(s)
                            1. String self 3,687K (0.4%), 67,434 object(s)
                            1. String self 2K (< 0.1%), 32 object(s)
                          1. String self 5,796K (0.7%), 67,451 object(s)
                        1. org.apache.hadoop.hive.ql.plan.PartitionDesc self 2,634K (0.3%), 67,451 object(s)
                            1. String self 8,367K (1.0%), 31,501 object(s)
                            1. String self 7,875K (0.9%), 31,501 object(s)
                            1. String self 5,864K (0.7%), 31,278 object(s)
                            1. String self 3,199K (0.4%), 31,501 object(s)
                            1. String self 2,953K (0.3%), 31,501 object(s)
                          1. java.net.URI self 2,461K (0.3%), 31,501 object(s)
                            1. String self 1,476K (0.2%), 31,501 object(s)
                            1. String self 41K (< 0.1%), 223 object(s)
                        1. org.apache.hadoop.fs.Path self 492K (< 0.1%), 31,501 object(s)
                      1. j.u.LinkedHashMap self 3,229K (0.4%), 37 object(s)
                        1. j.u.LinkedHashMap$LinkedEntrySet self 592b (< 0.1%), 37 object(s)
                            1. String self 9,549K (1.1%), 35,950 object(s)
                            1. String self 8,987K (1.1%), 35,950 object(s)
                            1. String self 6,693K (0.8%), 35,698 object(s)
                            1. String self 3,651K (0.4%), 35,950 object(s)
                            1. String self 3,370K (0.4%), 35,950 object(s)
                          1. java.net.URI self 2,808K (0.3%), 35,950 object(s)
                            1. String self 1,685K (0.2%), 35,950 object(s)
                            1. String self 47K (< 0.1%), 252 object(s)
                        1. org.apache.hadoop.fs.Path self 561K (< 0.1%), 35,950 object(s)
                        1. j.u.ArrayList self 5,269K (0.6%), 67,451 object(s)
                          1. String self 2K (< 0.1%), 37 object(s)
                      1. j.u.LinkedHashMap self 3,229K (0.4%), 37 object(s)
                      2. ... 2 more references together retaining 688b (< 0.1%)
                            1. ... 12 references together retaining 1,233K (0.1%)
                          1. j.u.ArrayList self 5K (< 0.1%), 74 object(s)
                        1. ... 10 more references together retaining 76K (< 0.1%)
                      1. j.u.LinkedList self 2K (< 0.1%), 37 object(s)
                    1. ... 13 more references together retaining 522K (< 0.1%)
                        1. String self 2,420K (0.3%), 5,826 object(s)
                      1. ... 2 more references together retaining 1,113K (0.1%)
                          1. String self 1,588K (0.2%), 16,536 object(s)
                        1. String[] self 299K (< 0.1%), 12,762 object(s)
                      1. ... 2 more references together retaining 719K (< 0.1%)
                        1. String self 892K (0.1%), 6,923 object(s)
                      1. ... 2 more references together retaining 1,117K (0.1%)
                    1. ... 4 more references together retaining 3K (< 0.1%)
                                          1. String self 1,963K (0.2%), 10,471 object(s)
                                        1. java.net.URI self 854K (0.1%), 10,938 object(s)
                                        2. ... 2 more references together retaining 87K (< 0.1%)
                                      1. org.apache.hadoop.fs.Path self 170K (< 0.1%), 10,938 object(s)
                                    1. j.u.LinkedHashMap self 523K (< 0.1%), 6 object(s)
                                  1. org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher self 144b (< 0.1%), 6 object(s)
                                  1. ... 6 references together retaining 1,554K (0.2%)
                                1. ... 2 more references together retaining 576b (< 0.1%)
                              1. ... 2 more references together retaining 672b (< 0.1%)
                            1. ... 4 more references together retaining 38K (< 0.1%)
                            1. ... 7 references together retaining 1,571K (0.2%)
                          1. ... 7 more references together retaining 245K (< 0.1%)
                        1. ... 4 more references together retaining 82K (< 0.1%)
                      1. org.apache.spark.TaskContext$$anon$1 self 1K (< 0.1%), 78 object(s)
                    1. scala.collection.mutable.ArrayBuffer self 3K (< 0.1%), 37 object(s)
                      1. org.apache.spark.unsafe.memory.MemoryBlock[] self 1,184K (0.1%), 37 object(s)
                    1. ... 4 more references together retaining 713K (< 0.1%)
                  1. ... 41 more references together retaining 694K (< 0.1%)
                1. ... 2 more references together retaining 22K (< 0.1%)
              1. j.l.ThreadLocal$ThreadLocalMap$Entry[] self 6K (< 0.1%), 37 object(s)
            1. j.l.ThreadLocal$ThreadLocalMap self 888b (< 0.1%), 37 object(s)
          1. ... 44 more references together retaining 1,337K (0.2%)
        1. j.l.Thread[] self 528b (< 0.1%), 1 object(s)
      1. ... 3 more references together retaining 96b (< 0.1%)
    1. ... 3 more references together retaining 128b (< 0.1%)
  1. ... 7 more references together retaining 280b (< 0.1%)
236,338K (28.0%) Object tree for GC root(s) Java Local(org.apache.parquet.hadoop.ParquetFileReader) [@e27511e0,@e2ba8838,@e2c05ca0,@e2c05e68] ... and 26 more GC roots (30 thread(s))
                1. byte[] self 235,392K (27.9%), 29 object(s)
              1. java.nio.HeapByteBuffer self 1K (< 0.1%), 29 object(s)
            1. org.apache.parquet.bytes.BytesInput$ByteBufferBytesInput self 696b (< 0.1%), 29 object(s)
          1. org.apache.parquet.column.page.DictionaryPage self 928b (< 0.1%), 29 object(s)
          1. ... 2 references together retaining 898K (0.1%)
        1. ... 2 more references together retaining 928b (< 0.1%)
      1. ... 2 more references together retaining 4K (< 0.1%)
    1. org.apache.parquet.hadoop.ColumnChunkPageReadStore self 720b (< 0.1%), 30 object(s)
  1. ... 10 more references together retaining 38K (< 0.1%)
183,025K (21.7%) Object tree for GC root(s) Java Local(java.nio.HeapByteBuffer) [@e2c258a0,@e6da5218,@e6dd49f8,@e6ddbef8] ... and 23 more GC roots (27 thread(s))
    1. byte[] self 183,023K (21.7%), 27 object(s)
  1. java.nio.HeapByteBuffer self 1K (< 0.1%), 27 object(s)
67,029K (7.9%) Object tree for GC root(s) Java Static io.netty.resolver.DefaultAddressResolverGroup.INSTANCE
                      1. byte[] self 65,536K (7.8%), 4 object(s)
                    1. ... 4 more references together retaining 64K (< 0.1%)
                  1. ... 2 more references together retaining 160b (< 0.1%)
                1. ... 16 more references together retaining 99K (< 0.1%)
              1. ... 11 more references together retaining 155K (< 0.1%)
            1. Object[] self 576b (< 0.1%), 4 object(s)
          1. ... 3 more references together retaining 3K (< 0.1%)
        1. ... 8 more references together retaining 18K (< 0.1%)
            1. ... 13 references together retaining 925K (0.1%)
          1. io.netty.util.concurrent.EventExecutor[] self 144b (< 0.1%), 3 object(s)
        1. ... 5 more references together retaining 1K (< 0.1%)
      1. ... 18 more references together retaining 223K (< 0.1%)
    1. j.u.IdentityHashMap self 312b (< 0.1%), 1 object(s)
  1. io.netty.resolver.DefaultAddressResolverGroup self 16b (< 0.1%), 1 object(s)
18,600K (2.2%) Object tree for GC root(s) Java Local(org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher) [@d1c1b630,@d1c2af88,@d1c2b228,@d1c3ea88] ... and 27 more GC roots (31 thread(s))
          1. String self 10,005K (1.2%), 53,364 object(s)
        1. java.net.URI self 4,415K (0.5%), 56,513 object(s)
        2. ... 2 more references together retaining 590K (< 0.1%)
      1. org.apache.hadoop.fs.Path self 883K (0.1%), 56,513 object(s)
    1. j.u.LinkedHashMap self 2,705K (0.3%), 31 object(s)
  1. org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher self 744b (< 0.1%), 31 object(s)
16,472K (1.9%) Object tree for GC root(s) Java Local@c0029b50 (org.apache.spark.rpc.netty.NettyRpcEnv)
                1. byte[] self 16,384K (1.9%), 1 object(s)
              1. ... 4 more references together retaining 16K (< 0.1%)
            1. ... 2 more references together retaining 320b (< 0.1%)
          1. ... 15 more references together retaining 24K (< 0.1%)
        1. io.netty.buffer.PoolArena[] self 48b (< 0.1%), 1 object(s)
      1. ... 7 more references together retaining 41K (< 0.1%)
    1. ... 4 more references together retaining 288b (< 0.1%)
  1. ... 12 more references together retaining 6K (< 0.1%)
15,892K (1.9%) Object tree for GC root(s) Java Local(org.apache.hadoop.mapred.JobConf) [@c56bfa00,@c8cc83d0,@c8e31e08,@c8ef2768] ... and 58 more GC roots (31 thread(s))
    1. java.util.concurrent.ConcurrentHashMap self 6,032K (0.7%), 61 object(s)
    2. ... 2 more references together retaining 16K (< 0.1%)
    1. j.u.Properties self 4,811K (0.6%), 61 object(s)
      1. String self 107K (< 0.1%), 245 object(s)
    1. j.u.Properties self 4,811K (0.6%), 61 object(s)
      1. String self 100K (< 0.1%), 239 object(s)
  1. ... 4 more references together retaining 11K (< 0.1%)
14,225K (1.7%) Object tree for GC root(s) Java Local(org.apache.spark.rdd.MapPartitionsRDD) [@c571a210,@c8cc8560,@c8e31f18,@c8ef2878] ... and 33 more GC roots (37 thread(s))
          1. byte[] self 14,157K (1.7%), 31 object(s)
        1. ... 4 more references together retaining 61K (< 0.1%)
      1. org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1 self 496b (< 0.1%), 31 object(s)
    1. org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23 self 496b (< 0.1%), 31 object(s)
  1. ... 4 more references together retaining 5K (< 0.1%)
9,255K (1.1%) Object tree for GC root(s) Java Local(j.u.ArrayList) [@e2c25570,@e2c255f0] (1 thread(s))
        1. byte[] self 9,255K (1.1%), 1 object(s)
      1. java.nio.HeapByteBuffer self 48b (< 0.1%), 1 object(s)
    1. ... 3 more references together retaining 40b (< 0.1%)
  1. j.u.ArrayList self 128b (< 0.1%), 2 object(s)
9,027K (1.1%) Unreachable (garbage) objects
  1. byte[] self 7,712K (0.9%), 2 object(s)
  2. ... 119 more references together retaining 1,315K (0.2%)
7,752K (0.9%) Object tree for GC root(s) Java Local@f0bec7c0 (java.nio.HeapByteBuffer)
    1. byte[] self 7,752K (0.9%), 1 object(s)
  1. java.nio.HeapByteBuffer self 48b (< 0.1%), 1 object(s)
2,988K (0.4%) Object tree for GC root(s) Java Local(org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction) [@c9f25298,@cce80178,@cd208d10,@cd27c2c0] ... and 2 more GC roots (6 thread(s))
    1. byte[] self 2,740K (0.3%), 6 object(s)
  1. ... 4 more references together retaining 248K (< 0.1%)
2,573K (0.3%) Object tree for GC root(s) Java Static [org.apache.hive.com.esotericsoftware.reflectasm.AccessClassLoader.selfContextParentClassLoader], JNI Global [@c0056890]
      1. String self 995K (0.1%), 6,922 object(s)
    1. ... 2 more references together retaining 388K (< 0.1%)
  1. ... 14 more references together retaining 1,189K (0.1%)
1,915K (0.2%) Object tree for GC root(s) Java Static org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.INTERNER
                1. j.u.Properties self 981K (0.1%), 1,495 object(s)
                2. ... 3 more references together retaining 496K (< 0.1%)
              1. ... 3 more references together retaining 421K (< 0.1%)
            1. Object[] self 16K (< 0.1%), 4 object(s)
          1. java.util.concurrent.atomic.AtomicReferenceArray self 64b (< 0.1%), 4 object(s)
        1. ... 4 more references together retaining 768b (< 0.1%)
      1. com.google.common.collect.MapMakerInternalMap$Segment[] self 32b (< 0.1%), 1 object(s)
    1. ... 7 more references together retaining 104b (< 0.1%)
  1. com.google.common.collect.Interners$CustomInterner self 16b (< 0.1%), 1 object(s)
1,365K (0.2%) Object tree for GC root(s) Java Static org.apache.parquet.hadoop.CodecFactory.CODEC_BY_NAME
        1. ... 7 references together retaining 1,365K (0.2%)
      1. org.apache.parquet.hadoop.codec.SnappyCodec self 24b (< 0.1%), 1 object(s)
    1. ... 2 more references together retaining 160b (< 0.1%)
  1. j.u.Collections$SynchronizedMap self 32b (< 0.1%), 1 object(s)
... and 7411 more GC roots together retaining 5,857K (0.7%)




4. Live vs Garbage Objects

     Live   Garbage  Total 
 Objects 6,040,101 20,7556,060,856
 Bytes 835,881K (98.9%) 9,027K (1.1%)844,909K (100.0%)

Details:

  #instances
garbage 
  Shallow size garbage   #instances
live 
  Shallow size live  Class name 
 47 7,713K (0.9%) 2,571 537,166K (63.6%)byte[]
 4,228 377K (< 0.1%) 833,252 100,852K (11.9%)char[]
 0 0b (0.0%) 2,769,527 86,547K (10.2%)j.u.Hashtable$Entry
 69 4K (< 0.1%) 137,683 23,456K (2.8%)j.u.Hashtable$Entry[]
 4,200 98K (< 0.1%) 833,630 19,538K (2.3%)String
 222 8K (< 0.1%) 270,569 10,569K (1.3%)j.u.LinkedHashMap$Entry
 0 0b (0.0%) 135,145 10,558K (1.2%)java.net.URI
 177 10K (< 0.1%) 69,384 7,236K (0.9%)j.u.HashMap$Node[]
 0 0b (0.0%) 207,012 6,469K (0.8%)java.util.concurrent.ConcurrentHashMap$Node
 3,357 180K (< 0.1%) 72,282 4,284K (0.5%)Object[]
 154 8K (< 0.1%) 68,091 3,723K (0.4%)j.u.LinkedHashMap
 0 0b (0.0%) 67,488 3,690K (0.4%)org.apache.hadoop.hive.common.CopyOnFirstWriteProperties
 0 0b (0.0%) 70,130 3,287K (0.4%)j.u.Properties
 0 0b (0.0%) 234 2,868K (0.3%)java.util.concurrent.ConcurrentHashMap$Node[]
 0 0b (0.0%) 67,488 2,636K (0.3%)org.apache.hadoop.hive.ql.plan.PartitionDesc
 0 0b (0.0%) 68,081 2,127K (0.3%)org.apache.hadoop.hive.ql.plan.TableDesc
 0 0b (0.0%) 135,141 2,111K (0.2%)org.apache.hadoop.fs.Path
 263 6K (< 0.1%) 71,378 1,672K (0.2%)j.u.ArrayList
 0 0b (0.0%) 37 1,184K (0.1%)org.apache.spark.unsafe.memory.MemoryBlock[]




5. Fixed per-object overhead

  Fixed per-object overhead  Total overhead 
 12b71,025K (8.4%)

Details:

  #instances   (Average) object
size 
  Total overhead
per class 
 Class name 
 2,769,527 32b 32,455K (3.8%)j.u.Hashtable$Entry
 837,830 24b 9,818K (1.2%)String
 837,480 123b 9,814K (1.2%)char[]
 270,791 40b 3,173K (0.4%)j.u.LinkedHashMap$Entry
 207,012 32b 2,425K (0.3%)java.util.concurrent.ConcurrentHashMap$Node
 137,752 174b 1,614K (0.2%)j.u.Hashtable$Entry[]
 135,145 80b 1,583K (0.2%)java.net.URI
 135,141 16b 1,583K (0.2%)org.apache.hadoop.fs.Path
 75,639 60b 886K (0.1%)Object[]




6. Memory Retained by Objects Awaiting Finalization:  no significant overhead 

... and 1 more GC roots together retaining 0b (0.0%)




7. Duplicate Strings:  overhead 13.6% 

  Total strings   Unique strings   Duplicate values  Overhead 
 837,830 36,207 16,558115,322K (13.6%)

Top duplicate strings

  Overhead   # char[]s   # objects  Value 
 6,874K (0.8%) 67,689 67,689"vc0501.halxg.cloudera.com:8020"
 6,347K (0.8%) 67,705 67,705"vc0501.halxg.cloudera.com"
 3,172K (0.4%) 67,690 67,690"hdfs"
 2,435K (0.3%) 7 7"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450816,hdfs://vc0 ...[length 207821]"
 725K (< 0.1%) 4,424 4,424"org.apache.hadoop.hive.conf.LoopingByteArrayInputStream@207ea13"
 334K (< 0.1%) 4,754 4,754"programmatically"
 191K (< 0.1%) 2,724 2,724"hdfs-default.xml"
 172K (< 0.1%) 37 37"{"type":"struct","fields":[{"name":"ss_sold_time_sk","type":"long","nullable":true,"metadata":{"HIVE_TYPE_STRING":"bigint"}} ...[length 2425]"
 149K (< 0.1%) 2,129 2,129"yarn-default.xml"
 128K (< 0.1%) 2,351 2,351"false"
 109K (< 0.1%) 1,555 1,555"core-default.xml"
 104K (< 0.1%) 581 581"file:/run/cloudera-scm-agent/process/61-hive-HIVESERVER2/hive-site.xml"
 102K (< 0.1%) 1,309 1,309"hbase-default.xml"
 87K (< 0.1%) 74 74"AQEAamF2YS51dGlsLkFycmF5TGlz9AECAQFvcmcuYXBhY2hlLmhhZG9vcC5oaXZlLnFsLnBsYW4uRXhwck5vZGVHZW5lcmljRnVuY0Rlc+MBAQABAQECb3JnLmFw ...[length 596]"
 78K (< 0.1%) 1,685 1,685"true"
 76K (< 0.1%) 981 981"mapred-default.xml"
 75K (< 0.1%) 744 744"DECIMAL_STRINGIFIER(scale: 2)"
 70K (< 0.1%) 31 31"message schema {\n optional int64 ss_sold_time_sk;\n optional int64 ss_item_sk;\n optional int64 ss_customer_sk;\n optional ...[length 1173]"
 65K (< 0.1%) 37 37"Reducer 4 Explain Plan:\n\n Reduce Operator Tree:\n Select Operator\n expressions: KEY.reducesinkkey0 (type: int), KEY. ...[length 906]"
 59K (< 0.1%) 74 74"ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 396]"
 49K (< 0.1%) 74 74"ss_sold_time_sk,ss_item_sk,ss_customer_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_qua ...[length 326]"
 41K (< 0.1%) 37 37"struct store_sales { i64 ss_sold_time_sk, i64 ss_item_sk, i64 ss_customer_sk, i64 ss_cdemo_sk, i64 ss_hdemo_sk, i64 ss_addr_ ...[length 565]"
 40K (< 0.1%) 7 7"hive\.auto\..*|hive\.cbo\..*|hive\.convert\..*|hive\.exec\.dynamic\.partition.*|hive\.exec\..*\.dynamic\.partitions\..*|hive ...[length 3427]"
 37K (< 0.1%) 31 31"H4sIAAAAAAAAAFvzloG1uIjBKL8oXS+xIDE5I1WvILGosDS1RC8tM6cktchIr6AoNSUzObEkVc+/ILUosSS/qFjFL7/kXvJrTy0P44tMDEw+DJxwRSUMZj5Aw/Qh ...[length 624]"



Reference Chains for Duplicate Strings

Expensive data fields

24,217K (2.9%), 130892 / 99% dup strings (1829 unique), 130891 dup backing arrays:

  Num strings  String value 
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451394"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451249"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451315"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451993"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452233"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452497"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450931"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451070"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451281"
 74"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452283"
       ... and 129806 more strings, of which 1819 are unique

 ↖java.net.URI.path
20,793K (2.5%), 131220 / 99% dup strings (3647 unique), 131220 dup backing arrays:

  Num strings  String value 
 36"145208265"
 36"42387971"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452497"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452093"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451457"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450892"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451401"
 36"42315735"
 36"42262965"
 36"96137537"
       ... and 130572 more strings, of which 3637 are unique

 ↖j.u.Hashtable$Entry.value
17,710K (2.1%), 67683 / 99% dup strings (1971 unique), 67683 dup backing arrays:

  Num strings  String value 
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452497"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452093"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451457"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452422"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451195"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451967"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450892"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451401"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451249"
 37"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452361"
       ... and 67303 more strings, of which 1961 are unique

 ↖java.net.URI.string
17,078K (2.0%), 70998 / 99% dup strings (3392 unique), 70998 dup backing arrays:

  Num strings  String value 
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452076"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452203"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452384"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451814"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451468"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451263"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450836"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451156"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451020"
 37"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452171"
       ... and 70618 more strings, of which 3382 are unique

 ↖java.net.URI.schemeSpecificPart
6,874K (0.8%), 67688 / 100% dup strings (1 unique), 67688 dup backing arrays:

  Num strings  String value 
 67688"vc0501.halxg.cloudera.com:8020"

 ↖java.net.URI.authority
6,345K (0.8%), 67688 / 100% dup strings (1 unique), 67688 dup backing arrays:

  Num strings  String value 
 67688"vc0501.halxg.cloudera.com"

 ↖java.net.URI.host
5,639K (0.7%), 67451 / 100% dup strings (1823 unique), 67451 dup backing arrays:

  Num strings  String value 
 37"ss_sold_date_sk=2450958"
 37"ss_sold_date_sk=2452223"
 37"ss_sold_date_sk=2451892"
 37"ss_sold_date_sk=2452221"
 37"ss_sold_date_sk=2450963"
 37"ss_sold_date_sk=2451944"
 37"ss_sold_date_sk=2451843"
 37"ss_sold_date_sk=2451882"
 37"ss_sold_date_sk=2451557"
 37"ss_sold_date_sk=2451016"
       ... and 66711 more strings, of which 1813 are unique

org.apache.hadoop.hive.ql.plan.PartitionDesc.baseFileName {j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,589K (0.4%), 67451 / 100% dup strings (1823 unique), 67451 dup backing arrays:

  Num strings  String value 
 37"2452628"
 37"2452232"
 37"2452465"
 37"2452454"
 37"2450864"
 37"2451604"
 37"2452225"
 37"2452217"
 37"2451351"
 37"2452000"
       ... and 66711 more strings, of which 1813 are unique

{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.PartitionDesc.partSpec
3,172K (0.4%), 67691 / 100% dup strings (2 unique), 67691 dup backing arrays:

  Num strings  String value 
 67688"hdfs"
 3"file"

 ↖java.net.URI.scheme
2,289K (0.3%), 7149 / 99% dup strings (685 unique), 7138 dup backing arrays:

  Num strings  String value 
 987"false"
 724"true"
 229"0"
 190"10"
 189"1000"
 139"1"
 115"10000"
 99"100"
 98"-1"
 82"3"
       ... and 4287 more strings, of which 675 are unique

{j.u.Properties}.values org.apache.hadoop.mapred.JobConf.properties


Full reference chains

17,213K (2.0%), 65609 / 100% dup strings (1823 unique), 65609 dup backing arrays:

  Num strings  String value 
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452497"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452422"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450892"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452093"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451967"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451195"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451457"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451401"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451249"
 36"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452361"
       ... and 64908 more strings, of which 1813 are unique

j.u.Hashtable$Entry.value j.u.Hashtable$Entry.{next}
j.u.Hashtable$Entry[]
org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table
org.apache.hadoop.hive.ql.plan.PartitionDesc.properties
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
9,899K (1.2%), 53364 / 100% dup strings (1823 unique), 53364 dup backing arrays:

  Num strings  String value 
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451364"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451755"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451225"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450964"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451394"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451249"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451315"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451993"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452233"
 31"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452422"
       ... and 52805 more strings, of which 1813 are unique

java.net.URI.path org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo
↖Java Local(org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher) [@d1c1b630,@d1c2af88,@d1c2b228,@d1c3ea88] ... and 27 more GC roots (31 thread(s))

9,408K (1.1%), 35950 / 100% dup strings (1823 unique), 35950 dup backing arrays:

  Num strings  String value 
 27"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450878"
 27"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450852"
 27"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450897"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452412"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451459"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450929"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450891"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452627"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450947"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451285"
       ... and 35572 more strings, of which 1813 are unique

java.net.URI.string org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
8,744K (1.0%), 35950 / 100% dup strings (1823 unique), 35950 dup backing arrays:

  Num strings  String value 
 27"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450878"
 27"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450852"
 27"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450897"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451112"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451262"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451522"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451850"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450849"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451285"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451464"
       ... and 35572 more strings, of which 1813 are unique

java.net.URI.schemeSpecificPart org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
8,244K (1.0%), 31501 / 100% dup strings (1823 unique), 31501 dup backing arrays:

  Num strings  String value 
 27"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451678"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451835"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452099"
 26"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451819"
 25"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452247"
 25"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452241"
 25"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452233"
 25"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452034"
 25"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452446"
 25"hdfs://vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451830"
       ... and 31139 more strings, of which 1813 are unique

java.net.URI.string org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
7,662K (0.9%), 31501 / 100% dup strings (1823 unique), 31501 dup backing arrays:

  Num strings  String value 
 27"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451678"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452099"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451835"
 26"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451819"
 25"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452034"
 25"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452446"
 25"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452247"
 25"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451830"
 25"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452233"
 25"//vc0501.halxg.cloudera.com:8020/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452241"
       ... and 31139 more strings, of which 1813 are unique

java.net.URI.schemeSpecificPart org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
6,588K (0.8%), 35698 / 100% dup strings (1823 unique), 35698 dup backing arrays:

  Num strings  String value 
 27"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450897"
 27"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450878"
 27"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450852"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450891"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451356"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450929"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452627"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451650"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450947"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451112"
       ... and 35323 more strings, of which 1813 are unique

java.net.URI.path org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
5,772K (0.7%), 31278 / 100% dup strings (1823 unique), 31278 dup backing arrays:

  Num strings  String value 
 27"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451678"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451835"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452099"
 26"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451819"
 25"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452247"
 25"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452233"
 25"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452241"
 25"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451830"
 25"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452034"
 25"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452446"
       ... and 30916 more strings, of which 1813 are unique

java.net.URI.path org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
5,639K (0.7%), 67451 / 100% dup strings (1823 unique), 67451 dup backing arrays:

  Num strings  String value 
 37"ss_sold_date_sk=2450958"
 37"ss_sold_date_sk=2452223"
 37"ss_sold_date_sk=2451892"
 37"ss_sold_date_sk=2452221"
 37"ss_sold_date_sk=2450963"
 37"ss_sold_date_sk=2451944"
 37"ss_sold_date_sk=2451843"
 37"ss_sold_date_sk=2451882"
 37"ss_sold_date_sk=2451557"
 37"ss_sold_date_sk=2451016"
       ... and 66711 more strings, of which 1813 are unique

org.apache.hadoop.hive.ql.plan.PartitionDesc.baseFileName {j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,651K (0.4%), 35950 / 100% dup strings (1 unique), 35950 dup backing arrays:

  Num strings  String value 
 35950"vc0501.halxg.cloudera.com:8020"

java.net.URI.authority org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,588K (0.4%), 67434 / 100% dup strings (1823 unique), 67434 dup backing arrays:

  Num strings  String value 
 37"2452628"
 37"2452232"
 37"2452465"
 37"2452454"
 37"2450864"
 37"2451604"
 37"2452225"
 37"2452217"
 37"2451351"
 37"2452000"
       ... and 66711 more strings, of which 1813 are unique

{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.PartitionDesc.partSpec
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,572K (0.4%), 65561 / 100% dup strings (1822 unique), 65561 dup backing arrays:

  Num strings  String value 
 36"145208265"
 36"42474522"
 36"42387971"
 36"96188421"
 36"42520947"
 36"42315735"
 36"96399033"
 36"42262965"
 36"96137537"
 36"42699198"
       ... and 64872 more strings, of which 1812 are unique

j.u.Hashtable$Entry.value j.u.Hashtable$Entry[]
org.apache.hadoop.hive.common.CopyOnFirstWriteProperties.table
org.apache.hadoop.hive.ql.plan.PartitionDesc.properties
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,370K (0.4%), 35950 / 100% dup strings (1 unique), 35950 dup backing arrays:

  Num strings  String value 
 35950"vc0501.halxg.cloudera.com"

java.net.URI.host org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
3,199K (0.4%), 31501 / 100% dup strings (1 unique), 31501 dup backing arrays:

  Num strings  String value 
 31501"vc0501.halxg.cloudera.com:8020"

java.net.URI.authority org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
2,953K (0.3%), 31501 / 100% dup strings (1 unique), 31501 dup backing arrays:

  Num strings  String value 
 31501"vc0501.halxg.cloudera.com"

java.net.URI.host org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
2,107K (0.2%), 5824 / 99% dup strings (661 unique), 5816 dup backing arrays:

  Num strings  String value 
 839"false"
 601"true"
 192"0"
 161"10"
 153"1000"
 117"1"
 98"10000"
 81"100"
 80"-1"
 70"3"
       ... and 3422 more strings, of which 651 are unique

{j.u.Properties}.values org.apache.hadoop.mapred.JobConf.properties
org.apache.hadoop.hive.ql.exec.MapredContext.jobConf
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,932K (0.2%), 10471 / 100% dup strings (1823 unique), 10471 dup backing arrays:

  Num strings  String value 
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451006"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451364"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2450976"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452056"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452624"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452247"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451230"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451591"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2451327"
 6"/user/systest/tpcds_1000_decimal_parquet/store_sales/ss_sold_date_sk=2452181"
       ... and 10374 more strings, of which 1813 are unique

java.net.URI.path org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher.pathToPartitionInfo
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.projectionPusher
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.recordReader
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.curReader
org.apache.spark.rdd.HadoopRDD$$anon$1.reader
org.apache.spark.rdd.HadoopRDD$$anon$1$$anonfun$2.$outer
org.apache.spark.TaskContext$$anon$1.f$1
{scala.collection.mutable.ArrayBuffer}
org.apache.spark.TaskContextImpl.onCompleteCallbacks
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,685K (0.2%), 35950 / 100% dup strings (1 unique), 35950 dup backing arrays:

  Num strings  String value 
 35950"hdfs"

java.net.URI.scheme org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,588K (0.2%), 16536 / 100% dup strings (14 unique), 16536 dup backing arrays:

  Num strings  String value 
 4074"programmatically"
 3792"org.apache.hadoop.hive.conf.LoopingByteArrayInputStream@207ea13"
 2334"hdfs-default.xml"
 1824"yarn-default.xml"
 1332"core-default.xml"
 1122"hbase-default.xml"
 840"mapred-default.xml"
 498"file:/run/cloudera-scm-agent/process/61-hive-HIVESERVER2/hive-site.xml"
 270"mapred-site.xml"
 192"core-site.xml"
       ... and 0 more strings, of which 4 are unique

String[] {java.util.concurrent.ConcurrentHashMap}.values
org.apache.hadoop.mapred.JobConf.updatingResource
org.apache.hadoop.hive.ql.exec.MapredContext.jobConf
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,476K (0.2%), 31501 / 100% dup strings (1 unique), 31501 dup backing arrays:

  Num strings  String value 
 31501"hdfs"

java.net.URI.scheme org.apache.hadoop.fs.Path.uri
{j.u.LinkedHashMap}.keys
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService





8. Bad Collections:  overhead 3.3% 

  Total collections   Bad collections  Overhead: 
 216,565 212,63328,124K (3.3%)

Top bad collections:

  Overhead   Problem   # objects  Type 
 11,626K (1.4%) 1-elem 67648 / 99%j.u.LinkedHashMap
 7,395K (0.9%) sparse 67491 / 96%j.u.Properties
 5,375K (0.6%) 1-elem 68884 / 96%j.u.ArrayList
 1,984K (0.2%) sparse 117 / 29%java.util.concurrent.ConcurrentHashMap
 985K (0.1%) sparse 116 / 0%j.u.LinkedHashMap


Reference Chains for Bad Collections

Expensive data fields

11,599K (1.4%): j.u.LinkedHashMap: 67488 / 100% of 1-elem 11,599K (1.4%)
 ↖org.apache.hadoop.hive.ql.plan.PartitionDesc.partSpec
7,475K (0.9%): j.u.Properties: 67488 / 99% of sparse 7,381K (0.9%), 482 / 0% of small 94K (< 0.1%)
 ↖org.apache.hadoop.hive.ql.plan.TableDesc.properties
5,269K (0.6%): j.u.ArrayList: 67451 / 100% of 1-elem 5,269K (0.6%)
{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,892K (0.2%): java.util.concurrent.ConcurrentHashMap: 80 / 100% of sparse 1,892K (0.2%)
 ↖org.apache.hadoop.mapred.JobConf.updatingResource


Full reference chains

11,593K (1.4%): j.u.LinkedHashMap: 67451 / 100% of 1-elem 11,593K (1.4%)
org.apache.hadoop.hive.ql.plan.PartitionDesc.partSpec {j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
7,377K (0.9%): j.u.Properties: 67451 / 100% of sparse 7,377K (0.9%)
org.apache.hadoop.hive.ql.plan.TableDesc.properties org.apache.hadoop.hive.ql.plan.PartitionDesc.tableDesc
{j.u.LinkedHashMap}.values
org.apache.hadoop.hive.ql.plan.MapWork.pathToPartitionInfo
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
5,269K (0.6%): j.u.ArrayList: 67451 / 100% of 1-elem 5,269K (0.6%)
{j.u.LinkedHashMap}.values org.apache.hadoop.hive.ql.plan.MapWork.pathToAliases
{j.u.HashMap}.values
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
1,442K (0.2%): java.util.concurrent.ConcurrentHashMap: 61 / 100% of sparse 1,442K (0.2%)
org.apache.hadoop.mapred.JobConf.updatingResource ↖Java Local(org.apache.hadoop.mapred.JobConf) [@c56bfa00,@c8cc83d0,@c8e31e08,@c8ef2768] ... and 58 more GC roots (31 thread(s))






9. Bad object arrays:  overhead 0.3% 

  Total object arrays   Bad object arrays  Overhead: 
 308,383 20,2532,431K (0.3%)

Top bad object arrays:

  Overhead   Problem   # objects  Type 
 1,184K (0.1%) empty 37 / 100%org.apache.spark.unsafe.memory.MemoryBlock[]


Reference Chains for Bad Object Arrays

Expensive data fields

1,184K (0.1%): org.apache.spark.unsafe.memory.MemoryBlock[]: 37 / 100% of empty 1,184K (0.1%)
org.apache.spark.memory.TaskMemoryManager.pageTable org.apache.spark.TaskContextImpl.taskMemoryManager
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService


Full reference chains

1,184K (0.1%): org.apache.spark.unsafe.memory.MemoryBlock[]: 37 / 100% of empty 1,184K (0.1%)
org.apache.spark.memory.TaskMemoryManager.pageTable org.apache.spark.TaskContextImpl.taskMemoryManager
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
210K (< 0.1%): String[]: 8970 / 70% of 1-length 210K (< 0.1%)
{java.util.concurrent.ConcurrentHashMap}.values org.apache.hadoop.mapred.JobConf.updatingResource
org.apache.hadoop.hive.ql.exec.MapredContext.jobConf
j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService
148K (< 0.1%): java.nio.ByteBuffer[]: 37 / 100% of 1-elem 148K (< 0.1%)
sun.nio.ch.Util$BufferCache.buffers j.l.ThreadLocal$ThreadLocalMap$Entry.value
j.l.ThreadLocal$ThreadLocalMap$Entry[]
j.l.ThreadLocal$ThreadLocalMap.table
org.apache.spark.util.UninterruptibleThread.threadLocals
j.l.Thread[]
j.l.ThreadGroup.threads
io.netty.util.concurrent.DefaultThreadFactory.threadGroup
java.util.concurrent.ThreadPoolExecutor.threadFactory
↖Java Static org.apache.spark.network.shuffle.RetryingBlockFetcher.executorService





10. Bad Primitive Arrays:  overhead 28.9% 

  Total primitive arrays   Bad primitive arrays  Overhead 
 844,406 5,186244,002K (28.9%)

Top bad primitive arrays:

  Overhead   Problem   # objects  Type 
 165,046K (19.5%) trail-0s 236 / 9%byte[]
 78,470K (9.3%) empty 172 / 6%byte[]


Reference Chains for Bad Primitive Arrays

Expensive data fields

152,317K (18.0%): byte[]: 22 / 68% of trail-0s 82,438K (9.8%), 8 / 25% of empty 69,879K (8.3%), 1 / 3% of 0-length 16b (< 0.1%)
 ↖java.nio.HeapByteBuffer.hb
81,911K (9.7%): byte[]: 5 / 100% of trail-0s 81,911K (9.7%)
 ↖io.netty.buffer.PoolChunk.memory


Full reference chains

144,542K (17.1%): byte[]: 20 / 74% of trail-0s 82,423K (9.8%), 6 / 22% of empty 62,118K (7.4%)
java.nio.HeapByteBuffer.hb ↖Java Local(java.nio.HeapByteBuffer) [@e2c258a0,@e6da5218,@e6dd49f8,@e6ddbef8] ... and 23 more GC roots (27 thread(s))

65,527K (7.8%): byte[]: 4 / 100% of trail-0s 65,527K (7.8%)
io.netty.buffer.PoolChunk.memory io.netty.buffer.PoolChunkList.head
io.netty.buffer.PoolArena$HeapArena.qInit
io.netty.buffer.PoolThreadCache.heapArena
Object[]
io.netty.util.internal.InternalThreadLocalMap.indexedVariables
io.netty.util.concurrent.FastThreadLocalThread.threadLocalMap
io.netty.channel.nio.NioEventLoop.thread
{j.u.IdentityHashMap}
io.netty.resolver.DefaultAddressResolverGroup.resolvers
↖Java Static io.netty.resolver.DefaultAddressResolverGroup.INSTANCE
16,383K (1.9%): byte[]: 1 / 100% of trail-0s 16,383K (1.9%)
io.netty.buffer.PoolChunk.memory io.netty.buffer.PoolChunkList.head
io.netty.buffer.PoolArena$HeapArena.qInit
io.netty.buffer.PoolArena[]
io.netty.buffer.PooledByteBufAllocator.heapArenas
org.apache.spark.network.client.TransportClientFactory.pooledAllocator
org.apache.spark.rpc.netty.NettyRpcEnv.clientFactory
↖Java Local@c0029b50 (org.apache.spark.rpc.netty.NettyRpcEnv)

7,752K (0.9%): byte[]: 1 / 100% of empty 7,752K (0.9%)
java.nio.HeapByteBuffer.hb ↖Java Local@f0bec7c0 (java.nio.HeapByteBuffer)

7,712K (0.9%): byte[]: 2 / 0% of empty 7,712K (0.9%)
↖Unreachable
  All or some objects may start live as:

7,752K (0.9%): byte[]: 1 / 100% of empty 7,752K (0.9%)
java.nio.HeapByteBuffer.hb ↖Java Local@f0bec7c0 (java.nio.HeapByteBuffer)

640K (< 0.1%): byte[]: 20 / 100% of empty 640K (< 0.1%)
sun.nio.ch.EPollArrayWrapper.eventsLow sun.nio.ch.EPollSelectorImpl.pollWrapper
io.netty.channel.nio.NioEventLoop.unwrappedSelector
io.netty.util.concurrent.EventExecutor[]
io.netty.channel.nio.NioEventLoopGroup.children
io.netty.channel.nio.NioEventLoop.parent
{j.u.IdentityHashMap}
io.netty.resolver.DefaultAddressResolverGroup.resolvers
↖Java Static io.netty.resolver.DefaultAddressResolverGroup.INSTANCE
8K (< 0.1%): byte[]: 1 / 100% of empty 8K (< 0.1%)
java.io.BufferedInputStream.buf ↖Java Static java.lang.System.in
2K (< 0.1%): byte[]: 28 / 100% of empty 2K (< 0.1%)
org.apache.hadoop.hdfs.client.HdfsDataInputStream.bytearr org.apache.parquet.hadoop.util.H2SeekableInputStream.stream
↖Java Local(org.apache.parquet.hadoop.util.H2SeekableInputStream) [@e27512b0,@e2ba8820,@e2c05c88,@e2c060b8] ... and 24 more GC roots (28 thread(s))








11. Boxed Numbers:  no significant overhead 

  Total boxed objects  Overhead 
 1,84133K (< 0.1%)


Reference Chains for Boxed Numbers

Full reference chains

4K (< 0.1%): j.l.Long: 253 / 100% objects

 Random sample 
      j.l.Long(-68)
      j.l.Long(36)
      j.l.Long(77)
      j.l.Long(-125)
      j.l.Long(79)
      j.l.Long(100)
      j.l.Long(-62)
      j.l.Long(82)
      j.l.Long(3)
      j.l.Long(-99)
      j.l.Long(85)
      j.l.Long(-97)
      j.l.Long(-96)
      j.l.Long(-115)
      j.l.Long(109)
      j.l.Long(-73)
      j.l.Long(-12)
      j.l.Long(112)
      j.l.Long(33)
      j.l.Long(-89)

j.l.Long[] ↖Java Static java.lang.Long$LongCache.cache
4K (< 0.1%): j.l.Byte: 253 / 100% objects

 Random sample 
      j.l.Byte(-68)
      j.l.Byte(36)
      j.l.Byte(77)
      j.l.Byte(-125)
      j.l.Byte(79)
      j.l.Byte(100)
      j.l.Byte(-62)
      j.l.Byte(82)
      j.l.Byte(3)
      j.l.Byte(-99)
      j.l.Byte(85)
      j.l.Byte(-97)
      j.l.Byte(-96)
      j.l.Byte(-115)
      j.l.Byte(109)
      j.l.Byte(-73)
      j.l.Byte(-12)
      j.l.Byte(112)
      j.l.Byte(33)
      j.l.Byte(-89)

j.l.Byte[] ↖Java Static java.lang.Byte$ByteCache.cache
4K (< 0.1%): j.l.Short: 253 / 100% objects

 Random sample 
      j.l.Short(-68)
      j.l.Short(36)
      j.l.Short(77)
      j.l.Short(-125)
      j.l.Short(79)
      j.l.Short(100)
      j.l.Short(-62)
      j.l.Short(82)
      j.l.Short(3)
      j.l.Short(-99)
      j.l.Short(85)
      j.l.Short(-97)
      j.l.Short(-96)
      j.l.Short(-115)
      j.l.Short(109)
      j.l.Short(-73)
      j.l.Short(-12)
      j.l.Short(112)
      j.l.Short(33)
      j.l.Short(-89)

j.l.Short[] ↖Java Static java.lang.Short$ShortCache.cache





12. Duplicate Objects:  no significant overhead 

Reference Chains for Duplicate Objects




13. Duplicate Primitive Arrays:  overhead 2.1% 

  Total arrays   Unique arrays   Duplicate values  Overhead 
 7,126 1,622 22517,634K (2.1%)

Types of duplicate objects:

  Overhead   # objects   Unique objects  Class name 
 17,505K (2.1%) 2,618 754byte[]
 67K (< 0.1%) 3,890 671int[]
 41K (< 0.1%) 246 95long[]
 12K (< 0.1%) 200 52char[]
 5K (< 0.1%) 122 24boolean[]
 1K (< 0.1%) 36 18short[]
 232b (< 0.1%) 9 6double[]
 96b (< 0.1%) 5 2float[]


Top duplicate arrays

  Overhead   Num arrays  Value 
 16,441K (1.9%) 37byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}
 832K (< 0.1%) 27byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
 107K (< 0.1%) 37byte[3028]{-84, -19, 0, 5, 's', 'r', 0, '%', 'o', 'r', 'g', '.', 'a', 'p', 'a', 'c', 'h', 'e', '.', 's', 'p', 'a', 'r', 'k', '.', 'e', 'x', 'e', 'c', 'u', ...}
 45K (< 0.1%) 2,894int[0]{}
 36K (< 0.1%) 37long[128]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
 36K (< 0.1%) 10byte[4096]{0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, ...}
 20K (< 0.1%) 6byte[4096]{0, 1, 2, 1, 3, 2, 2, 2, 4, 3, 3, 3, 3, 3, 3, 3, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, ...}
 8K (< 0.1%) 358byte[1]{'?'}
 8K (< 0.1%) 2byte[8192]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
 7K (< 0.1%) 29int[64]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
 6K (< 0.1%) 37char[80]{, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ...}
 5K (< 0.1%) 328byte[0]{}
 4K (< 0.1%) 76char[19]{, , , , , , , , , , , , , , , , , , }
 4K (< 0.1%) 2byte[4096]{0, 1, 2, 1, 3, 2, 2, 2, 4, 3, 3, 3, 3, 3, 3, 3, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, ...}
 3K (< 0.1%) 69byte[36]{'b', '7', '9', '0', '0', '4', '3', 'd', '-', '1', '4', 'f', '5', '-', '4', 'b', '2', '0', '-', 'b', 'd', 'f', 'a', '-', 'a', '2', '8', '8', '7', 'd', ...}
 3K (< 0.1%) 37byte[80]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
 3K (< 0.1%) 37byte[73]{'o', 'r', 'g', '.', 'a', 'p', 'a', 'c', 'h', 'e', '.', 'h', 'a', 'd', 'o', 'o', 'p', '.', 'h', 'i', 'v', 'e', '.', 'q', 'l', '.', 'i', 'o', '.', 'C', ...}
 3K (< 0.1%) 69byte[25]{'v', 'c', '0', '5', '1', '3', '.', 'h', 'a', 'l', 'x', 'g', '.', 'c', 'l', 'o', 'u', 'd', 'e', 'r', 'a', '.', 'c', 'o', 'm'}
 2K (< 0.1%) 69byte[12]{'1', '0', '.', '1', '7', '.', '2', '1', '1', '.', '2', '3'}
 2K (< 0.1%) 2int[512]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}
 1K (< 0.1%) 81byte[8]{0, 0, 0, 0, 0, 0, 0, 0}



Reference Chains for Duplicate Primitive Arrays

Expensive data fields

16,441K (1.9%): byte[]: 37 / 100% dup arrays (1 unique)

  Num objects  Object value 
 37byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}

 ↖org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.buffer


Full reference chains

13,774K (1.6%): byte[]: 31 / 100% dup arrays (1 unique)

  Num objects  Object value 
 31byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}

org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.buffer org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.f$9
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.cleanedF$1
org.apache.spark.rdd.MapPartitionsRDD.f
↖Java Local(org.apache.spark.rdd.MapPartitionsRDD) [@c571a210,@c8cc8560,@c8e31f18,@c8ef2878] ... and 33 more GC roots (37 thread(s))

2,666K (0.3%): byte[]: 6 / 100% dup arrays (1 unique)

  Num objects  Object value 
 6byte[467639]{-114, 8, 'L', '!', 'm', 'a', 'p', 'r', 'e', 'd', 'u', 'c', 'e', '.', 'j', 'o', 'b', 'h', 'i', 's', 't', 'o', 'r', 'y', '.', 'j', 'h', 'i', 's', 't', ...}

org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.buffer ↖Java Local(org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction) [@c9f25298,@cce80178,@cd208d10,@cd27c2c0] ... and 2 more GC roots (6 thread(s))

616K (< 0.1%): byte[]: 20 / 100% dup arrays (1 unique)

  Num objects  Object value 
 20byte[32768]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...}

sun.nio.ch.EPollArrayWrapper.eventsLow sun.nio.ch.EPollSelectorImpl.pollWrapper
io.netty.channel.nio.NioEventLoop.unwrappedSelector
io.netty.util.concurrent.EventExecutor[]
io.netty.channel.nio.NioEventLoopGroup.children
io.netty.channel.nio.NioEventLoop.parent
{j.u.IdentityHashMap}
io.netty.resolver.DefaultAddressResolverGroup.resolvers
↖Java Static io.netty.resolver.DefaultAddressResolverGroup.INSTANCE





14. Heap Size Configuration:  no significant overhead 
Maximum heap size (-Xmx) is configured properly for your working set.



15. Very Long (Over 1000 Elements) Reference Chains:  not found 



16. Thread stacks (total threads: 95)
Thread name: "Executor task launch worker for task 276", daemon: true
java.lang.OutOfMemoryError.(OutOfMemoryError.java:48)
java.nio.HeapByteBuffer.(HeapByteBuffer.java:57)
java.nio.ByteBuffer.allocate(ByteBuffer.java:335)
org.apache.parquet.bytes.HeapByteBufferAllocator.allocate(HeapByteBufferAllocator.java:32)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1092)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


8 threads, first thread name: "Executor task launch worker for task 279", daemon: true

  Thread ID   Name  Daemon 
 33 "Executor task launch worker for task 279"true
 37 "Executor task launch worker for task 275"true
 15 "Executor task launch worker for task 291"true
 40 "Executor task launch worker for task 272"true
 45 "Executor task launch worker for task 267"true
 42 "Executor task launch worker for task 270"true
 30 "Executor task launch worker for task 282"true
 14 "Executor task launch worker for task 303"true


sun.nio.ch.FileDispatcherImpl.read0(Native method)
sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:39)
sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:223)
sun.nio.ch.IOUtil.read(IOUtil.java:192)
sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:380)
org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57)
org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.readChannelFully(PacketReceiver.java:256)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:207)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:169)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:102)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.readNextPacket(BlockReaderRemote.java:187)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.read(BlockReaderRemote.java:168)
org.apache.hadoop.hdfs.ByteBufferStrategy.readFromBlock(ReaderStrategy.java:189)
org.apache.hadoop.hdfs.DFSInputStream.readBuffer(DFSInputStream.java:706)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:767)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:843)
org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:147)
org.apache.parquet.hadoop.util.H2SeekableInputStream$H2Reader.read(H2SeekableInputStream.java:95)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


2 threads, first thread name: "netty-rpc-env-timeout", daemon: true

  Thread ID   Name  Daemon 
 50 "netty-rpc-env-timeout"true
 46 "driver-heartbeater"true


sun.misc.Unsafe.park(Native method)
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093)
java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809)
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


5 threads, first thread name: "rpc-client-3-1", daemon: true

  Thread ID   Name  Daemon 
 49 "rpc-client-3-1"true
 16 "shuffle-client-4-2"true
 17 "shuffle-client-4-1"true
 47 "shuffle-client-6-1"true
 48 "shuffle-server-5-1"true


sun.nio.ch.EPollArrayWrapper.epollWait(Native method)
sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62)
io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:753)
io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:409)
io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)
io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:138)
java.lang.Thread.run(Thread.java:748)


40 threads, first thread name: "dispatcher-event-loop-24", daemon: true

  Thread ID   Name  Daemon 
 67 "dispatcher-event-loop-24"true
 65 "dispatcher-event-loop-26"true
 52 "dispatcher-event-loop-39"true
 70 "dispatcher-event-loop-21"true
 78 "dispatcher-event-loop-13"true
 54 "dispatcher-event-loop-37"true
 62 "dispatcher-event-loop-29"true
 64 "dispatcher-event-loop-27"true
 68 "dispatcher-event-loop-23"true
 76 "dispatcher-event-loop-15"true
 72 "dispatcher-event-loop-19"true
 82 "dispatcher-event-loop-9"true
 90 "dispatcher-event-loop-1"true
 88 "dispatcher-event-loop-3"true
 84 "dispatcher-event-loop-7"true
 80 "dispatcher-event-loop-11"true
 86 "dispatcher-event-loop-5"true
 91 "dispatcher-event-loop-0"true
 58 "dispatcher-event-loop-33"true
 75 "dispatcher-event-loop-16"true
 74 "dispatcher-event-loop-17"true
 57 "dispatcher-event-loop-34"true
 59 "dispatcher-event-loop-32"true
 60 "dispatcher-event-loop-31"true
 56 "dispatcher-event-loop-35"true
 55 "dispatcher-event-loop-36"true
 71 "dispatcher-event-loop-20"true
 53 "dispatcher-event-loop-38"true
 69 "dispatcher-event-loop-22"true
 61 "dispatcher-event-loop-30"true
 63 "dispatcher-event-loop-28"true
 89 "dispatcher-event-loop-2"true
 73 "dispatcher-event-loop-18"true
 77 "dispatcher-event-loop-14"true
 79 "dispatcher-event-loop-12"true
 83 "dispatcher-event-loop-8"true
 85 "dispatcher-event-loop-6"true
 87 "dispatcher-event-loop-4"true
 81 "dispatcher-event-loop-10"true
 66 "dispatcher-event-loop-25"true


sun.misc.Unsafe.park(Native method)
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
org.apache.spark.rpc.netty.Dispatcher$MessageLoop.run(Dispatcher.scala:210)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "org.apache.hadoop.hdfs.PeerCache@704ec603", daemon: true
java.lang.Thread.sleep(Native method)
org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253)
org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46)
org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124)
java.lang.Thread.run(Thread.java:748)


8 threads, first thread name: "Executor task launch worker for task 277", daemon: true

  Thread ID   Name  Daemon 
 35 "Executor task launch worker for task 277"true
 31 "Executor task launch worker for task 281"true
 26 "Executor task launch worker for task 286"true
 39 "Executor task launch worker for task 273"true
 38 "Executor task launch worker for task 274"true
 4 "Executor task launch worker for task 423"true
 2 "Executor task launch worker for task 447"true
 6 "Executor task launch worker for task 399"true


sun.nio.ch.NativeThread.current(Native method)
sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:326)
org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57)
org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.readChannelFully(PacketReceiver.java:256)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:207)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:134)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:102)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.readNextPacket(BlockReaderRemote.java:187)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.read(BlockReaderRemote.java:168)
org.apache.hadoop.hdfs.ByteBufferStrategy.readFromBlock(ReaderStrategy.java:189)
org.apache.hadoop.hdfs.DFSInputStream.readBuffer(DFSInputStream.java:706)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:767)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:843)
org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:147)
org.apache.parquet.hadoop.util.H2SeekableInputStream$H2Reader.read(H2SeekableInputStream.java:95)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "netty-rpc-connection-0", daemon: true
sun.misc.Unsafe.park(Native method)
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:467)
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1073)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 280", daemon: true
sun.nio.ch.EPollArrayWrapper.epollWait(Native method)
sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:157)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118)
java.io.FilterInputStream.read(FilterInputStream.java:83)
org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:493)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.newBlockReader(BlockReaderRemote.java:414)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.getRemoteBlockReader(BlockReaderFactory.java:856)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.getRemoteBlockReaderFromTcp(BlockReaderFactory.java:752)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.build(BlockReaderFactory.java:386)
org.apache.hadoop.hdfs.DFSInputStream.getBlockReader(DFSInputStream.java:645)
org.apache.hadoop.hdfs.DFSInputStream.blockSeekTo(DFSInputStream.java:575)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:758)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:843)
org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:147)
org.apache.parquet.hadoop.util.H2SeekableInputStream$H2Reader.read(H2SeekableInputStream.java:95)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


5 threads, first thread name: "Executor task launch worker for task 278", daemon: true

  Thread ID   Name  Daemon 
 34 "Executor task launch worker for task 278"true
 29 "Executor task launch worker for task 283"true
 25 "Executor task launch worker for task 287"true
 43 "Executor task launch worker for task 269"true
 10 "Executor task launch worker for task 351"true


org.apache.hadoop.util.NativeCrc32.nativeComputeChunkedSums(Native method)
org.apache.hadoop.util.NativeCrc32.verifyChunkedSums(NativeCrc32.java:63)
org.apache.hadoop.util.DataChecksum.verifyChunkedSums(DataChecksum.java:316)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.readNextPacket(BlockReaderRemote.java:216)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.read(BlockReaderRemote.java:168)
org.apache.hadoop.hdfs.ByteBufferStrategy.readFromBlock(ReaderStrategy.java:189)
org.apache.hadoop.hdfs.DFSInputStream.readBuffer(DFSInputStream.java:706)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:767)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:843)
org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:147)
org.apache.parquet.hadoop.util.H2SeekableInputStream$H2Reader.read(H2SeekableInputStream.java:95)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 315", daemon: true
sun.nio.ch.NativeThread.current(Native method)
sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:326)
org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57)
org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118)
java.io.FilterInputStream.read(FilterInputStream.java:83)
org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:493)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.newBlockReader(BlockReaderRemote.java:414)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.getRemoteBlockReader(BlockReaderFactory.java:856)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.getRemoteBlockReaderFromTcp(BlockReaderFactory.java:752)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.build(BlockReaderFactory.java:386)
org.apache.hadoop.hdfs.DFSInputStream.getBlockReader(DFSInputStream.java:645)
org.apache.hadoop.hdfs.DFSInputStream.blockSeekTo(DFSInputStream.java:575)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:758)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:843)
org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:147)
org.apache.parquet.hadoop.util.H2SeekableInputStream$H2Reader.read(H2SeekableInputStream.java:95)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Finalizer", daemon: true
java.lang.Object.wait(Native method)
java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:143)
java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:164)
java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:209)


Thread name: "main", daemon: false
sun.misc.Unsafe.park(Native method)
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1475)
org.apache.spark.rpc.netty.Dispatcher.awaitTermination(Dispatcher.scala:183)
org.apache.spark.rpc.netty.NettyRpcEnv.awaitTermination(NettyRpcEnv.scala:284)
org.apache.spark.executor.CoarseGrainedExecutorBackend$$anonfun$run$1.apply$mcV$sp(CoarseGrainedExecutorBackend.scala:227)
org.apache.spark.deploy.SparkHadoopUtil$$anon$2.run(SparkHadoopUtil.scala:63)
org.apache.spark.deploy.SparkHadoopUtil$$anon$2.run(SparkHadoopUtil.scala:62)
java.security.AccessController.doPrivileged(Native method)
javax.security.auth.Subject.doAs(Subject.java:422)
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1685)
org.apache.spark.deploy.SparkHadoopUtil.runAsSparkUser(SparkHadoopUtil.scala:62)
org.apache.spark.executor.CoarseGrainedExecutorBackend$.run(CoarseGrainedExecutorBackend.scala:184)
org.apache.spark.executor.CoarseGrainedExecutorBackend$.main(CoarseGrainedExecutorBackend.scala:280)
org.apache.spark.executor.CoarseGrainedExecutorBackend.main(CoarseGrainedExecutorBackend.scala, line not available)


3 threads, first thread name: "IPC Parameter Sending Thread #0", daemon: true

  Thread ID   Name  Daemon 
 19 "IPC Parameter Sending Thread #0"true
 92 "process reaper"true
 1 "IPC Parameter Sending Thread #1"true


sun.misc.Unsafe.park(Native method)
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
java.util.concurrent.SynchronousQueue$TransferStack.awaitFulfill(SynchronousQueue.java:460)
java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:362)
java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:941)
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1073)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "client DomainSocketWatcher", daemon: true
org.apache.hadoop.net.unix.DomainSocketWatcher.doPoll0(Native method)
org.apache.hadoop.net.unix.DomainSocketWatcher.access$900(DomainSocketWatcher.java:52)
org.apache.hadoop.net.unix.DomainSocketWatcher$2.run(DomainSocketWatcher.java:503)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 339", daemon: true
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 435", daemon: true
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


2 threads, first thread name: "Executor task launch worker for task 387", daemon: true

  Thread ID   Name  Daemon 
 7 "Executor task launch worker for task 387"true
 27 "Executor task launch worker for task 285"true


org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.tokenize(TypeInfoUtils.java:301)
org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.(TypeInfoUtils.java:317)
org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString(TypeInfoUtils.java:817)
org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe.initialize(ParquetHiveSerDe.java:103)
org.apache.hadoop.hive.serde2.AbstractSerDe.initialize(AbstractSerDe.java:58)
org.apache.hadoop.hive.serde2.SerDeUtils.initializeSerDe(SerDeUtils.java:531)
org.apache.hadoop.hive.ql.plan.TableDesc.getDeserializer(TableDesc.java:100)
org.apache.hadoop.hive.ql.plan.TableDesc.getDeserializer(TableDesc.java:91)
org.apache.hadoop.hive.ql.exec.MapOperator.getConvertedOI(MapOperator.java:308)
org.apache.hadoop.hive.ql.exec.MapOperator.setChildren(MapOperator.java:362)
org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.init(SparkMapRecordHandler.java:84)
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.call(HiveMapFunction.java:55)
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.call(HiveMapFunction.java:30)
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.apply(JavaRDDLike.scala:186)
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.apply(JavaRDDLike.scala:186)
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797)
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 411", daemon: true
java.lang.Object.hashCode(Native method)
java.util.HashMap.hash(HashMap.java:338)
java.util.HashMap.get(HashMap.java:556)
org.apache.parquet.format.PageHeader.read(PageHeader.java:843)
org.apache.parquet.format.Util.read(Util.java:213)
org.apache.parquet.format.Util.readPageHeader(Util.java:65)
org.apache.parquet.hadoop.ParquetFileReader$WorkaroundChunk.readPageHeader(ParquetFileReader.java:994)
org.apache.parquet.hadoop.ParquetFileReader$Chunk.readAllPages(ParquetFileReader.java:872)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:724)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 375", daemon: true
java.nio.HeapByteBuffer.(HeapByteBuffer.java:57)
java.nio.ByteBuffer.allocate(ByteBuffer.java:335)
org.apache.parquet.bytes.HeapByteBufferAllocator.allocate(HeapByteBufferAllocator.java:32)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1092)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 363", daemon: true
sun.nio.ch.EPollArrayWrapper.epollWait(Native method)
sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:157)
org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.readChannelFully(PacketReceiver.java:256)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:207)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:169)
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:102)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.readNextPacket(BlockReaderRemote.java:187)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.read(BlockReaderRemote.java:168)
org.apache.hadoop.hdfs.ByteBufferStrategy.readFromBlock(ReaderStrategy.java:189)
org.apache.hadoop.hdfs.DFSInputStream.readBuffer(DFSInputStream.java:706)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:767)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:843)
org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:147)
org.apache.parquet.hadoop.util.H2SeekableInputStream$H2Reader.read(H2SeekableInputStream.java:95)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 288", daemon: true
java.nio.channels.spi.AbstractInterruptibleChannel.begin(AbstractInterruptibleChannel.java:157)
sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:631)
org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:192)
org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
org.apache.hadoop.hdfs.DFSClient.newConnectedPeer(DFSClient.java:2837)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.nextTcpPeer(BlockReaderFactory.java:824)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.getRemoteBlockReaderFromTcp(BlockReaderFactory.java:749)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.build(BlockReaderFactory.java:386)
org.apache.hadoop.hdfs.DFSInputStream.getBlockReader(DFSInputStream.java:645)
org.apache.hadoop.hdfs.DFSInputStream.blockSeekTo(DFSInputStream.java:575)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:758)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:843)
org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:147)
org.apache.parquet.hadoop.util.H2SeekableInputStream$H2Reader.read(H2SeekableInputStream.java:95)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


2 threads, first thread name: "Executor task launch worker for task 289", daemon: true

  Thread ID   Name  Daemon 
 23 "Executor task launch worker for task 289"true
 41 "Executor task launch worker for task 271"true


org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.tokenize(TypeInfoUtils.java:302)
org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.(TypeInfoUtils.java:317)
org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString(TypeInfoUtils.java:817)
org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe.initialize(ParquetHiveSerDe.java:103)
org.apache.hadoop.hive.serde2.AbstractSerDe.initialize(AbstractSerDe.java:58)
org.apache.hadoop.hive.serde2.SerDeUtils.initializeSerDe(SerDeUtils.java:531)
org.apache.hadoop.hive.ql.plan.PartitionDesc.getDeserializer(PartitionDesc.java:174)
org.apache.hadoop.hive.ql.exec.MapOperator.getConvertedOI(MapOperator.java:293)
org.apache.hadoop.hive.ql.exec.MapOperator.setChildren(MapOperator.java:362)
org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.init(SparkMapRecordHandler.java:84)
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.call(HiveMapFunction.java:55)
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.call(HiveMapFunction.java:30)
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.apply(JavaRDDLike.scala:186)
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.apply(JavaRDDLike.scala:186)
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797)
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 268", daemon: true
com.google.protobuf.CodedInputStream.(CodedInputStream.java:573)
com.google.protobuf.CodedInputStream.newInstance(CodedInputStream.java:55)
com.google.protobuf.AbstractParser.parsePartialFrom(AbstractParser.java:199)
com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:217)
com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:223)
com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:49)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos$BlockOpResponseProto.parseFrom(DataTransferProtos.java:23190)
org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.newBlockReader(BlockReaderRemote.java:413)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.getRemoteBlockReader(BlockReaderFactory.java:856)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.getRemoteBlockReaderFromTcp(BlockReaderFactory.java:752)
org.apache.hadoop.hdfs.client.impl.BlockReaderFactory.build(BlockReaderFactory.java:386)
org.apache.hadoop.hdfs.DFSInputStream.getBlockReader(DFSInputStream.java:645)
org.apache.hadoop.hdfs.DFSInputStream.blockSeekTo(DFSInputStream.java:575)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:758)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:843)
org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:147)
org.apache.parquet.hadoop.util.H2SeekableInputStream$H2Reader.read(H2SeekableInputStream.java:95)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:104)
org.apache.parquet.hadoop.util.H2SeekableInputStream.readFully(H2SeekableInputStream.java:89)
org.apache.parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:1093)
org.apache.parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:722)
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:126)
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:212)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:101)
org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper.(ParquetRecordReaderWrapper.java:63)
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat.getRecordReader(MapredParquetInputFormat.java:75)
org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.(CombineHiveRecordReader.java:68)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:423)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.initNextRecordReader(HadoopShimsSecure.java:257)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.(HadoopShimsSecure.java:217)
org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileInputFormatShim.getRecordReader(HadoopShimsSecure.java:345)
org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getRecordReader(CombineHiveInputFormat.java:702)
org.apache.spark.rdd.HadoopRDD$$anon$1.liftedTree1$1(HadoopRDD.scala:251)
org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:250)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:208)
org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Executor task launch worker for task 327", daemon: true
java.util.HashMap.hash(HashMap.java:338)
java.util.HashMap.get(HashMap.java:556)
org.apache.hadoop.hive.ql.exec.MapOperator.getConvertedOI(MapOperator.java:305)
org.apache.hadoop.hive.ql.exec.MapOperator.setChildren(MapOperator.java:362)
org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.init(SparkMapRecordHandler.java:84)
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.call(HiveMapFunction.java:55)
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.call(HiveMapFunction.java:30)
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.apply(JavaRDDLike.scala:186)
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.apply(JavaRDDLike.scala:186)
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797)
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "Reference Handler", daemon: true
java.lang.ref.Reference.tryHandlePending(Reference.java:178)
java.lang.ref.Reference$ReferenceHandler.run(Reference.java:153)


Thread name: "Executor task launch worker for task 284", daemon: true
java.lang.String.substring(String.java:1969)
org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.tokenize(TypeInfoUtils.java:305)
org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.(TypeInfoUtils.java:317)
org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString(TypeInfoUtils.java:817)
org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe.initialize(ParquetHiveSerDe.java:103)
org.apache.hadoop.hive.serde2.AbstractSerDe.initialize(AbstractSerDe.java:58)
org.apache.hadoop.hive.serde2.SerDeUtils.initializeSerDe(SerDeUtils.java:531)
org.apache.hadoop.hive.ql.plan.PartitionDesc.getDeserializer(PartitionDesc.java:174)
org.apache.hadoop.hive.ql.exec.MapOperator.getConvertedOI(MapOperator.java:293)
org.apache.hadoop.hive.ql.exec.MapOperator.setChildren(MapOperator.java:362)
org.apache.hadoop.hive.ql.exec.spark.SparkMapRecordHandler.init(SparkMapRecordHandler.java:84)
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.call(HiveMapFunction.java:55)
org.apache.hadoop.hive.ql.exec.spark.HiveMapFunction.call(HiveMapFunction.java:30)
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.apply(JavaRDDLike.scala:186)
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$7$1.apply(JavaRDDLike.scala:186)
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797)
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797)
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
org.apache.spark.scheduler.Task.run(Task.scala:108)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)


Thread name: "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner", daemon: true
java.lang.Object.wait(Native method)
java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:143)
java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:164)
org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:3747)
java.lang.Thread.run(Thread.java:748)


Thread name: "IPC Client (1664403706) connection to vc0501.halxg.cloudera.com/10.17.211.11:8020 from hive", daemon: true
java.lang.Object.wait(Native method)
org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1016)
org.apache.hadoop.ipc.Client$Connection.run(Client.java:1060)





17. System Properties (result of java.lang.System.getProperties())

  Key  Value 
 awt.toolkitsun.awt.X11.XToolkit
 file.encodingANSI_X3.4-1968
 file.encoding.pkgsun.io
 file.separator/
 hive.spark.log.dir/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/spark/logs/
 java.awt.graphicsenvsun.awt.X11GraphicsEnvironment
 java.awt.printerjobsun.print.PSPrinterJob
 java.class.path/data/3/yarn/nm/usercache/hive/appcache/application_1529601638228_0031/container_1529601638228_0031_01_000021:/data/3/yarn/nm/usercache/hive/appcache/application_1529601638228_0031/container_1529601638228_0031_01_000021/__spark_conf__:/data/3/yarn/nm/usercache/hive/appcache/application_1529601638228_0031/container_1529601638228_0031_01_000021/__spark_libs__/*:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-pool-1.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-client-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/janino-3.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-mllib_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerby-asn1-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/validation-api-1.1.0.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/geronimo-jcache_1.0_spec-1.0-alpha-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/javax.ws.rs-api-2.0.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/gson-2.2.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-buffer-4.1.17.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/json-smart-2.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/libfb303-0.9.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/mssql-jdbc-6.2.1.jre7.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jersey-container-servlet-2.22.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-hdfs-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/re2j-1.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jodd-core-3.5.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/snappy-java-1.1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/univocity-parsers-2.2.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-math3-3.4.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-azure-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kafka_2.11-1.0.1-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/curator-framework-2.7.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jetty-xml-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-tags_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/chill-java-0.8.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/azure-storage-5.4.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/metrics-jvm-3.1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-client-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-sql_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/java-xmlbuilder-0.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/arpack_combined_all-0.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/ivy-2.4.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/objenesis-2.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/scala-library-2.11.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerby-config-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-lang-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/json4s-ast_2.11-3.2.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/metrics-core-3.1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-3.10.5.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/ehcache-3.3.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jersey-container-servlet-core-2.22.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/zookeeper.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-dataformat-cbor-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/scala-xml_2.11-1.0.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/minlog-1.3.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jtransforms-2.4.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/flume-ng-core-1.8.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/opencsv-2.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/accessors-smart-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/htrace-core4-4.1.0-incubating.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-hadoop-cloud_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/metrics-graphite-3.1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-admin-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-jaxrs-base-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-beanutils-1.9.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-databind-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-identity-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/scala-parser-combinators_2.11-1.0.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/avro-ipc.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jsr305-1.3.9.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jaxb-api-2.2.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/flume-ng-sdk-1.8.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/flume-ng-configuration-1.8.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/metrics-json-3.1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-yarn-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jersey-server-2.22.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-core-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jersey-guava-2.22.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jersey-media-jaxb-2.22.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jetty-webapp-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/parquet-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-graphx_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-simplekdc-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-aws.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-codec-http-4.1.17.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/scala-reflect-2.11.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/compress-lzf-1.0.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/guice-4.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-io-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jsch-0.1.54.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/derby-10.11.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jul-to-slf4j-1.7.25.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/parquet-column.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/osgi-resource-locator-1.0.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/slf4j-api-1.7.25.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-module-paranamer-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-repl_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-core_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jcl-over-slf4j-1.7.25.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-sql-kafka-0-10_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/mina-core-2.0.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/HikariCP-java7-2.4.12.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/okio-1.6.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/aws-java-sdk-bundle-1.11.271.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/xz-1.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-streaming-flume-sink_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/aopalliance-1.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-core-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/leveldbjni-all-1.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/stax2-api-3.1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/guice-servlet-4.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/scalap-2.11.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/javassist-3.18.1-GA.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/javax.inject-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/woodstox-core-5.0.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-sketch_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/avro.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/slf4j-log4j12-1.7.25.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-all-4.1.17.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spire_2.11-0.13.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-module-scala_2.11-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/javax.annotation-api-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-codec-4.1.17.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerby-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-openstack-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-collections-3.2.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/audience-annotations-0.5.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/metrics-servlet-2.2.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/avro-mapred-hadoop2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jets3t-0.9.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-mapper-asl-1.9.13-cloudera.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-catalyst_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-annotations.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-configuration2-2.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/guava-11.0.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-net-2.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-compiler-3.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/curator-client-2.7.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/scala-compiler-2.11.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/antlr4-runtime-4.5.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-avro_2.11-3.2.1-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-streaming-flume_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/core-1.1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-mllib-local_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/okhttp-2.7.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/parquet-hadoop.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kafka-clients-1.0.1-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-mapreduce-client-core.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/zkclient-0.10.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/RoaringBitmap-0.5.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jetty-jmx-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jersey-common-2.22.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-yarn-server-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/nimbus-jose-jwt-4.41.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/breeze_2.11-0.13.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/oro-2.0.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jline-2.12.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/chill_2.11-0.8.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-codec-1.10.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jersey-client-2.22.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-launcher_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-resolver-4.1.17.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-module-jaxb-annotations-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/joda-time-2.9.9.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/xbean-asm5-shaded-4.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-auth.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kryo-shaded-3.0.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerby-xdr-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/httpclient-4.5.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hk2-locator-2.4.0-b34.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jetty-util-ajax-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/parquet-format.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-lineage_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-hive_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-mapreduce-client-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-yarn_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-yarn-server-web-proxy.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-core-asl-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/curator-recipes-2.7.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-server-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-mapreduce-client-jobclient.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-cli-1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hk2-api-2.4.0-b34.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-annotations-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-transport-4.1.17.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/py4j-0.10.7.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/azure-keyvault-core-0.8.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-common-4.1.17.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/aopalliance-repackaged-2.4.0-b34.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/javax.inject-2.4.0-b34.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/stream-2.7.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/metrics-core-2.2.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-compress-1.4.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/json4s-jackson_2.11-3.2.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/paranamer-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/machinist_2.11-0.6.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/json4s-core_2.11-3.2.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-common-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-crypto-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jackson-jaxrs-json-provider-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/macro-compat_2.11-1.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/log4j-1.2.17.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/lz4-1.3.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-streaming_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/datanucleus-core-4.1.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/protobuf-java-2.5.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/breeze-macros_2.11-0.13.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/netty-handler-4.1.17.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/annotations-2.0.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-lang3-3.7.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerb-crypto-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/pyrolite-4.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/libthrift-0.9.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-network-common_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jsp-api-2.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hk2-utils-2.4.0-b34.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/httpcore-4.4.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/jcip-annotations-1.0-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-unsafe_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-network-shuffle_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-yarn-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/parquet-jackson.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/parquet-encoding.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/kerby-pkix-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/lz4-java-1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/javax.servlet-api-3.1.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/commons-logging-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/shapeless_2.11-2.3.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spark-streaming-kafka-0-10_2.11-2.2.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/spire-macros_2.11-0.13.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/jars/hadoop-yarn-api.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/spark/hive/hive-exec-2.1.1-cdh6.x-20180621.120800-1.jar:/etc/hadoop/conf.cloudera.YARN-1:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-common-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-aws.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-nfs.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-azure-datalake.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-annotations.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-kms.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-auth.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-kms-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-jackson.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-encoding.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-scala_2.10.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-protobuf.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-cascading3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-format.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-common-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-hadoop.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-generator.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-thrift.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-hadoop-bundle.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-pig.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-aws-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-format-sources.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-nfs-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-pig-bundle.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-column.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-auth-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-format-javadoc.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-common-3.0.0-cdh6.x-SNAPSHOT-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-cascading.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-annotations-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/hadoop-azure-datalake-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/parquet-avro.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-client-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/json-smart-2.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jsr311-api-1.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jsr305-3.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jetty-xml-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jaxb-impl-2.2.3-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/snappy-java-1.1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-lang-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/gson-2.2.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/zookeeper.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerby-asn1-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/asm-5.0.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/accessors-smart-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jersey-server-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-admin-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jackson-databind-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/metrics-core-3.0.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jetty-server-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-core-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/curator-client-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-math3-3.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-simplekdc-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jsch-0.1.54.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/netty-3.10.5.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jul-to-slf4j-1.7.25.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-io-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jettison-1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jackson-jaxrs-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jackson-core-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jaxb-api-2.2.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/woodstox-core-5.0.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-identity-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jackson-mapper-asl-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/aws-java-sdk-bundle-1.11.271.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/audience-annotations-0.5.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/xz-1.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jackson-xc-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/guava-11.0.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-collections-3.2.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-codec-1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jetty-servlet-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/nimbus-jose-jwt-4.41.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/paranamer-2.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jetty-webapp-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jackson-core-asl-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/curator-framework-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-configuration2-2.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/stax2-api-3.1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-beanutils-1.9.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jersey-servlet-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/curator-recipes-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-server-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/httpclient-4.5.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/logredactor-2.0.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/avro.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/slf4j-api-1.7.25.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerby-config-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/javax.servlet-api-3.1.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-cli-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/mockito-all-1.8.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-common-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jetty-security-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerby-xdr-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-net-3.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerby-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jetty-util-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/log4j-1.2.17.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jetty-http-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/protobuf-java-2.5.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-lang3-3.7.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerb-crypto-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jsp-api-2.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/httpcore-4.4.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jersey-core-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jcip-annotations-1.0-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/kerby-pkix-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/re2j-1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/azure-data-lake-store-sdk-2.2.9.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jersey-json-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-compress-1.4.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jackson-annotations-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/commons-logging-1.1.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/jetty-io-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/slf4j-log4j12.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/htrace-core4-4.1.0-incubating.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-native-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-native-client-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-httpfs.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-client-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-nfs.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-native-client-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-client-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-client-3.0.0-cdh6.x-SNAPSHOT-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-httpfs-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-nfs-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-native-client-3.0.0-cdh6.x-SNAPSHOT-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-3.0.0-cdh6.x-SNAPSHOT-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/hadoop-hdfs-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-client-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerby-asn1-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/gson-2.2.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jsr305-3.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/json-smart-2.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/snappy-java-1.1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-xml-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jaxb-impl-2.2.3-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerby-config-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jsr311-api-1.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/netty-3.10.5.Final.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/curator-client-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-lang-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jersey-server-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/accessors-smart-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/htrace-core4-4.1.0-incubating.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/asm-5.0.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-beanutils-1.9.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jackson-databind-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-identity-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-admin-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-server-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-codec-1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jaxb-api-2.2.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-webapp-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-math3-3.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jackson-xc-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-simplekdc-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-io-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-core-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-util-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jackson-jaxrs-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jettison-1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jsch-0.1.54.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jackson-core-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/leveldbjni-all-1.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/stax2-api-3.1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/woodstox-core-5.0.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/json-simple-1.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/xz-1.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jackson-mapper-asl-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/okio-1.6.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-collections-3.2.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/audience-annotations-0.5.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/avro-1.8.2-cdh6.x-20180621.114821-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-io-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-daemon-1.0.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-configuration2-2.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/guava-11.0.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerby-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/zookeeper-3.4.5-cdh6.x-20180621.114821-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/paranamer-2.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/nimbus-jose-jwt-4.41.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-servlet-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-logging-1.1.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerby-xdr-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/okhttp-2.7.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-util-ajax-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/curator-framework-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jackson-core-asl-1.9.13.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-server-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jackson-annotations-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-security-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/curator-recipes-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jersey-servlet-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-compress-1.4.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-common-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jersey-json-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/httpclient-4.5.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/re2j-1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-cli-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerby-pkix-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/javax.servlet-api-3.1.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jersey-core-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-net-3.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/kerb-crypto-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jcip-annotations-1.0-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/protobuf-java-2.5.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/httpcore-4.4.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/log4j-1.2.17.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/jetty-http-9.3.20.v20170531.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-hdfs/lib/commons-lang3-3.7.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-tests.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-applications-unmanaged-am-launcher.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-router.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-registry.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-timeline-pluginstorage.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-resourcemanager.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-applicationhistoryservice.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-sharedcachemanager.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-nodemanager.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-web-proxy.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-api.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-nodemanager-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-api-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-registry-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-common-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-resourcemanager-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-client-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-applications-unmanaged-am-launcher-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-timeline-pluginstorage-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-applicationhistoryservice-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-router-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-common-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-sharedcachemanager-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-web-proxy-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/hadoop-yarn-server-tests-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/jersey-guice-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/jersey-client-1.19.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/java-util-1.9.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/jackson-jaxrs-json-provider-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/fst-2.50.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/jackson-module-jaxb-annotations-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/avro.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/javax.inject-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/guice-servlet-4.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/aopalliance-1.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/HikariCP-java7-2.4.12.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/json-io-2.5.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/guice-4.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/metrics-core-3.0.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/jackson-jaxrs-base-2.9.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/zookeeper.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/ehcache-3.3.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/spark-2.2.0-cdh6.x-SNAPSHOT-yarn-shuffle.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/mssql-jdbc-6.2.1.jre7.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop-yarn/lib/geronimo-jcache_1.0_spec-1.0-alpha-1.jar:/etc/hadoop/conf.cloudera.YARN-1:/data/3/yarn/nm/usercache/hive/appcache/application_1529601638228_0031/container_1529601638228_0031_01_000021/mr-framework/*::/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../kudu/kudu-spark2_2.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/accessors-smart-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/accessors-smart.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/asm-5.0.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/asm.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/avro.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/aws-java-sdk-bundle-1.11.271.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/aws-java-sdk-bundle.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/azure-data-lake-store-sdk-2.2.9.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/azure-data-lake-store-sdk.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-beanutils-1.9.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-beanutils.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-cli-1.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-cli.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-codec-1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-codec.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-collections-3.2.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-collections.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-compress-1.4.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-compress.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-configuration2-2.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-configuration2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-io-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-io.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-lang-2.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-lang.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-lang3-3.7.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-lang3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-logging-1.1.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-logging.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-math3-3.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-math3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-net-3.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/commons-net.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/curator-client-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/curator-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/curator-framework-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/curator-framework.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/curator-recipes-2.12.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/curator-recipes.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/gson-2.2.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/gson.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/guava-11.0.2.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/guava.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-annotations-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-annotations.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-auth-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-auth.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-aws-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-aws.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-azure-datalake-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-azure-datalake.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-common-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-hdfs-client-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-hdfs-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-mapreduce-client-common-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-mapreduce-client-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-mapreduce-client-core-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-mapreduce-client-core.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-mapreduce-client-jobclient-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-mapreduce-client-jobclient.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-yarn-api-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-yarn-api.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-yarn-client-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-yarn-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-yarn-common-3.0.0-cdh6.x-SNAPSHOT.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/hadoop-yarn-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/htrace-core4-4.1.0-incubating.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/htrace-core4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/httpclient-4.5.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/httpclient.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/httpcore-4.4.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/httpcore.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jaxb-api-2.2.11.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jaxb-api.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jcip-annotations-1.0-1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jcip-annotations.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/json-smart-2.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/json-smart.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jsp-api-2.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jsp-api.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jsr305-3.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jsr305.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jsr311-api-1.1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/jsr311-api.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-admin-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-admin.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-client-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-client.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-common-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-common.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-core-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-core.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-crypto-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-crypto.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-identity-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-identity.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-server-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-server.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-simplekdc-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-simplekdc.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerb-util.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-asn1-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-asn1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-config-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-config.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-pkix-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-pkix.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-util.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-xdr-1.0.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/kerby-xdr.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/log4j-1.2.17.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/log4j.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/nimbus-jose-jwt-4.41.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/nimbus-jose-jwt.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/okhttp-2.7.5.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/okhttp.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/okio-1.6.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/okio.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/paranamer-2.8.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/paranamer.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/protobuf-java-2.5.0.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/protobuf-java.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/re2j-1.1.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/re2j.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/snappy-java-1.1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/snappy-java.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/stax2-api-3.1.4.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/stax2-api.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/woodstox-core-5.0.3.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/woodstox-core.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/xz-1.6.jar:/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/client/xz.jar
 java.class.version52.0
 java.endorsed.dirs/usr/java/jdk1.8.0_141/jre/lib/endorsed
 java.ext.dirs/usr/java/jdk1.8.0_141/jre/lib/ext:/usr/java/packages/lib/ext
 java.home/usr/java/jdk1.8.0_141/jre
 java.io.tmpdir/data/3/yarn/nm/usercache/hive/appcache/application_1529601638228_0031/container_1529601638228_0031_01_000021/tmp
 java.library.path/opt/cloudera/parcels/CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/../../../CDH-6.x-1.cdh6.x.p0.435002/lib/hadoop/lib/native::/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
 java.runtime.nameJava(TM) SE Runtime Environment
 java.runtime.version1.8.0_141-b15
 java.specification.nameJava Platform API Specification
 java.specification.vendorOracle Corporation
 java.specification.version1.8
 java.vendorOracle Corporation
 java.vendor.urlhttp://java.oracle.com/
 java.vendor.url.bughttp://bugreport.sun.com/bugreport/
 java.version1.8.0_141
 java.vm.infomixed mode
 java.vm.nameJava HotSpot(TM) 64-Bit Server VM
 java.vm.specification.nameJava Virtual Machine Specification
 java.vm.specification.vendorOracle Corporation
 java.vm.specification.version1.8
 java.vm.vendorOracle Corporation
 java.vm.version25.141-b15
 line.separator
 os.archamd64
 os.nameLinux
 os.version3.10.0-693.21.1.el7.x86_64
 path.separator:
 spark.authenticatefalse
 spark.driver.port38660
 spark.hadoop.hbase.master.info.port16010
 spark.hadoop.hbase.master.port16000
 spark.hadoop.hbase.regionserver.info.port16030
 spark.hadoop.hbase.regionserver.port16020
 spark.hadoop.hbase.rest.port8080
 spark.hadoop.hbase.status.multicast.address.port16100
 spark.network.crypto.enabledfalse
 spark.shuffle.service.port7337
 spark.ui.port0
 spark.yarn.app.container.log.dir/data/2/yarn/container-logs/application_1529601638228_0031/container_1529601638228_0031_01_000021
 sun.arch.data.model64
 sun.boot.class.path/usr/java/jdk1.8.0_141/jre/lib/resources.jar:/usr/java/jdk1.8.0_141/jre/lib/rt.jar:/usr/java/jdk1.8.0_141/jre/lib/sunrsasign.jar:/usr/java/jdk1.8.0_141/jre/lib/jsse.jar:/usr/java/jdk1.8.0_141/jre/lib/jce.jar:/usr/java/jdk1.8.0_141/jre/lib/charsets.jar:/usr/java/jdk1.8.0_141/jre/lib/jfr.jar:/usr/java/jdk1.8.0_141/jre/classes
 sun.boot.library.path/usr/java/jdk1.8.0_141/jre/lib/amd64
 sun.cpu.endianlittle
 sun.cpu.isalist
 sun.io.unicode.encodingUnicodeLittle
 sun.java.commandorg.apache.spark.executor.CoarseGrainedExecutorBackend --driver-url spark://CoarseGrainedScheduler@10.17.211.19:38660 --executor-id 11 --hostname vc0519.halxg.cloudera.com --cores 40 --app-id application_1529601638228_0031 --user-class-path file:/data/3/yarn/nm/usercache/hive/appcache/application_1529601638228_0031/container_1529601638228_0031_01_000021/__app__.jar
 sun.java.launcherSUN_STANDARD
 sun.jnu.encodingANSI_X3.4-1968
 sun.management.compilerHotSpot 64-Bit Tiered Compilers
 sun.nio.ch.bugLevel
 sun.os.patch.levelunknown
 user.countryUS
 user.dir/data/3/yarn/nm/usercache/hive/appcache/application_1529601638228_0031/container_1529601638228_0031_01_000021
 user.home/var/lib/hadoop-yarn
 user.languageen
 user.nameyarn
 user.timezoneAmerica/Los_Angeles