Uploaded image for project: 'Oozie'
  1. Oozie
  2. OOZIE-3138

TestSparkMain.testMain UT fails

    XMLWordPrintableJSON

Details

    • Test
    • Status: Open
    • Major
    • Resolution: Unresolved
    • 4.2.0
    • None
    • tests
    • None

    Description

      TestSparkMain.testMain UT fails with below error

      Error Message
      
      java.lang.RuntimeException: Unable to instantiate org.apache.hive.hcatalog.common.HiveClientCache$CacheableHiveMetaStoreClient
      Stacktrace
      
      com.google.common.util.concurrent.UncheckedExecutionException: java.lang.RuntimeException: Unable to instantiate org.apache.hive.hcatalog.common.HiveClientCache$CacheableHiveMetaStoreClient
      	at com.google.common.cache.LocalCache$Segment.get(LocalCache.java:2263)
      	at com.google.common.cache.LocalCache.get(LocalCache.java:4000)
      	at com.google.common.cache.LocalCache$LocalManualCache.get(LocalCache.java:4789)
      	at org.apache.hive.hcatalog.common.HiveClientCache.getOrCreate(HiveClientCache.java:291)
      	at org.apache.hive.hcatalog.common.HiveClientCache.get(HiveClientCache.java:273)
      	at org.apache.hive.hcatalog.common.HCatUtil.getHiveMetastoreClient(HCatUtil.java:558)
      	at org.apache.hive.hcatalog.api.HCatClientHMSImpl.initialize(HCatClientHMSImpl.java:823)
      	at org.apache.hive.hcatalog.api.HCatClient.create(HCatClient.java:71)
      	at org.apache.oozie.test.MiniHCatServer.start(MiniHCatServer.java:87)
      	at org.apache.oozie.test.XTestCase.setupHCatalogServer(XTestCase.java:998)
      	at org.apache.oozie.test.XTestCase.setUp(XTestCase.java:405)
      	at org.apache.oozie.test.XTestCase.setUp(XTestCase.java:281)
      	at org.apache.oozie.test.XFsTestCase.setUp(XFsTestCase.java:61)
      	at junit.framework.TestCase.runBare(TestCase.java:132)
      	at junit.framework.TestResult$1.protect(TestResult.java:110)
      	at junit.framework.TestResult.runProtected(TestResult.java:128)
      	at junit.framework.TestResult.run(TestResult.java:113)
      	at junit.framework.TestCase.run(TestCase.java:124)
      	at junit.framework.TestSuite.runTest(TestSuite.java:243)
      	at junit.framework.TestSuite.run(TestSuite.java:238)
      	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)
      	at org.junit.runners.Suite.runChild(Suite.java:128)
      	at org.junit.runners.Suite.runChild(Suite.java:24)
      	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:231)
      	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
      	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
      	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
      	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
      	at java.lang.Thread.run(Thread.java:745)
      Caused by: java.lang.RuntimeException: Unable to instantiate org.apache.hive.hcatalog.common.HiveClientCache$CacheableHiveMetaStoreClient
      	at org.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1566)
      	at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.<init>(RetryingMetaStoreClient.java:92)
      	at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.getProxy(RetryingMetaStoreClient.java:138)
      	at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.getProxy(RetryingMetaStoreClient.java:124)
      	at org.apache.hive.hcatalog.common.HiveClientCache$5.call(HiveClientCache.java:295)
      	at org.apache.hive.hcatalog.common.HiveClientCache$5.call(HiveClientCache.java:291)
      	at com.google.common.cache.LocalCache$LocalManualCache$1.load(LocalCache.java:4792)
      	at com.google.common.cache.LocalCache$LoadingValueReference.loadFuture(LocalCache.java:3599)
      	at com.google.common.cache.LocalCache$Segment.loadSync(LocalCache.java:2379)
      	at com.google.common.cache.LocalCache$Segment.lockedGetOrLoad(LocalCache.java:2342)
      	at com.google.common.cache.LocalCache$Segment.get(LocalCache.java:2257)
      	... 28 more
      Caused by: java.lang.reflect.InvocationTargetException
      	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
      	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
      	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
      	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
      	at org.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1564)
      	... 38 more
      Caused by: MetaException(message:Could not connect to meta store using any of the URIs provided. Most recent failure: org.apache.thrift.transport.TTransportException: java.net.ConnectException: Connection refused
      	at org.apache.thrift.transport.TSocket.open(TSocket.java:187)
      	at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.open(HiveMetaStoreClient.java:487)
      	at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.<init>(HiveMetaStoreClient.java:282)
      	at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.<init>(HiveMetaStoreClient.java:188)
      	at org.apache.hive.hcatalog.common.HiveClientCache$CacheableHiveMetaStoreClient.<init>(HiveClientCache.java:406)
      	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
      	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
      	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
      	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
      	at org.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1564)
      	at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.<init>(RetryingMetaStoreClient.java:92)
      	at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.getProxy(RetryingMetaStoreClient.java:138)
      	at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.getProxy(RetryingMetaStoreClient.java:124)
      	at org.apache.hive.hcatalog.common.HiveClientCache$5.call(HiveClientCache.java:295)
      	at org.apache.hive.hcatalog.common.HiveClientCache$5.call(HiveClientCache.java:291)
      	at com.google.common.cache.LocalCache$LocalManualCache$1.load(LocalCache.java:4792)
      	at com.google.common.cache.LocalCache$LoadingValueReference.loadFuture(LocalCache.java:3599)
      	at com.google.common.cache.LocalCache$Segment.loadSync(LocalCache.java:2379)
      	at com.google.common.cache.LocalCache$Segment.lockedGetOrLoad(LocalCache.java:2342)
      	at com.google.common.cache.LocalCache$Segment.get(LocalCache.java:2257)
      	at com.google.common.cache.LocalCache.get(LocalCache.java:4000)
      	at com.google.common.cache.LocalCache$LocalManualCache.get(LocalCache.java:4789)
      	at org.apache.hive.hcatalog.common.HiveClientCache.getOrCreate(HiveClientCache.java:291)
      	at org.apache.hive.hcatalog.common.HiveClientCache.get(HiveClientCache.java:273)
      	at org.apache.hive.hcatalog.common.HCatUtil.getHiveMetastoreClient(HCatUtil.java:558)
      	at org.apache.hive.hcatalog.api.HCatClientHMSImpl.initialize(HCatClientHMSImpl.java:823)
      	at org.apache.hive.hcatalog.api.HCatClient.create(HCatClient.java:71)
      	at org.apache.oozie.test.MiniHCatServer.start(MiniHCatServer.java:87)
      	at org.apache.oozie.test.XTestCase.setupHCatalogServer(XTestCase.java:998)
      	at org.apache.oozie.test.XTestCase.setUp(XTestCase.java:405)
      	at org.apache.oozie.test.XTestCase.setUp(XTestCase.java:281)
      	at org.apache.oozie.test.XFsTestCase.setUp(XFsTestCase.java:61)
      	at junit.framework.TestCase.runBare(TestCase.java:132)
      	at junit.framework.TestResult$1.protect(TestResult.java:110)
      	at junit.framework.TestResult.runProtected(TestResult.java:128)
      	at junit.framework.TestResult.run(TestResult.java:113)
      	at junit.framework.TestCase.run(TestCase.java:124)
      	at junit.framework.TestSuite.runTest(TestSuite.java:243)
      	at junit.framework.TestSuite.run(TestSuite.java:238)
      	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)
      	at org.junit.runners.Suite.runChild(Suite.java:128)
      	at org.junit.runners.Suite.runChild(Suite.java:24)
      	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:231)
      	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
      	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
      	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
      	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
      	at java.lang.Thread.run(Thread.java:745)
      Caused by: java.net.ConnectException: Connection refused
      	at java.net.PlainSocketImpl.socketConnect(Native Method)
      	at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)
      	at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)
      	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)
      	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
      	at java.net.Socket.connect(Socket.java:589)
      	at org.apache.thrift.transport.TSocket.open(TSocket.java:182)
      	... 47 more
      )
      	at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.open(HiveMetaStoreClient.java:534)
      	at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.<init>(HiveMetaStoreClient.java:282)
      	at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.<init>(HiveMetaStoreClient.java:188)
      	at org.apache.hive.hcatalog.common.HiveClientCache$CacheableHiveMetaStoreClient.<init>(HiveClientCache.java:406)
      	... 43 more
      Standard Output
      
      Setting testcase work dir[/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/test-data/oozietests/org.apache.oozie.action.hadoop.TestSparkMain/testMain]
      2017-11-28 15:41:48,529 WARN  [pool-1-thread-1] conf.Configuration (Configuration.java:<clinit>(664)) - DEPRECATED: hadoop-site.xml found in the classpath. Usage of hadoop-site.xml is deprecated. Instead use core-site.xml, mapred-site.xml and hdfs-site.xml to override properties of core-default.xml, mapred-default.xml and hdfs-default.xml respectively
      2017-11-28 15:41:48,961 INFO  [pool-1-thread-1] Configuration.deprecation (Configuration.java:warnOnceIfDeprecated(1194)) - mapred.tasktracker.map.tasks.maximum is deprecated. Instead, use mapreduce.tasktracker.map.tasks.maximum
      2017-11-28 15:41:48,962 INFO  [pool-1-thread-1] Configuration.deprecation (Configuration.java:warnOnceIfDeprecated(1194)) - mapred.tasktracker.reduce.tasks.maximum is deprecated. Instead, use mapreduce.tasktracker.reduce.tasks.maximum
      2017-11-28 15:41:49,139 WARN  [pool-1-thread-1] util.NativeCodeLoader (NativeCodeLoader.java:<clinit>(62)) - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
      Formatting using clusterid: testClusterID
      2017-11-28 15:41:49,576 INFO  [pool-1-thread-1] namenode.FSEditLog (FSEditLog.java:newInstance(225)) - Edit logging is async:false
      2017-11-28 15:41:49,582 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(758)) - No KeyProvider found.
      2017-11-28 15:41:49,583 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(768)) - fsLock is fair:true
      2017-11-28 15:41:49,644 INFO  [pool-1-thread-1] common.Util (Util.java:isDiskStatsEnabled(111)) - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
      2017-11-28 15:41:49,653 INFO  [pool-1-thread-1] Configuration.deprecation (Configuration.java:warnOnceIfDeprecated(1194)) - hadoop.configured.node.mapping is deprecated. Instead, use net.topology.configured.node.mapping
      2017-11-28 15:41:49,653 INFO  [pool-1-thread-1] blockmanagement.DatanodeManager (DatanodeManager.java:<init>(274)) - dfs.block.invalidate.limit=1000
      2017-11-28 15:41:49,653 INFO  [pool-1-thread-1] blockmanagement.DatanodeManager (DatanodeManager.java:<init>(280)) - dfs.namenode.datanode.registration.ip-hostname-check=true
      2017-11-28 15:41:49,658 INFO  [pool-1-thread-1] blockmanagement.BlockManager (InvalidateBlocks.java:printBlockDeletionTime(71)) - dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
      2017-11-28 15:41:49,660 INFO  [pool-1-thread-1] blockmanagement.BlockManager (InvalidateBlocks.java:printBlockDeletionTime(76)) - The block deletion will start around 2017 Nov 28 15:41:49
      2017-11-28 15:41:49,664 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(395)) - Computing capacity for map BlocksMap
      2017-11-28 15:41:49,664 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(396)) - VM type       = 64-bit
      2017-11-28 15:41:49,667 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(397)) - 2.0% max memory 981.5 MB = 19.6 MB
      2017-11-28 15:41:49,668 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(402)) - capacity      = 2^21 = 2097152 entries
      2017-11-28 15:41:49,697 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:createBlockTokenSecretManager(409)) - dfs.block.access.token.enable=false
      2017-11-28 15:41:49,701 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(395)) - defaultReplication         = 2
      2017-11-28 15:41:49,701 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(396)) - maxReplication             = 512
      2017-11-28 15:41:49,701 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(397)) - minReplication             = 1
      2017-11-28 15:41:49,701 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(398)) - maxReplicationStreams      = 2
      2017-11-28 15:41:49,701 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(399)) - replicationRecheckInterval = 3000
      2017-11-28 15:41:49,701 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(400)) - encryptDataTransfer        = false
      2017-11-28 15:41:49,702 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(401)) - maxNumBlocksToLog          = 1000
      2017-11-28 15:41:49,754 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(789)) - fsOwner             = nobody (auth:SIMPLE)
      2017-11-28 15:41:49,754 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(790)) - supergroup          = supergroup
      2017-11-28 15:41:49,754 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(791)) - isPermissionEnabled = true
      2017-11-28 15:41:49,755 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(802)) - HA Enabled: false
      2017-11-28 15:41:49,759 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(839)) - Append Enabled: true
      2017-11-28 15:41:49,995 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(395)) - Computing capacity for map INodeMap
      2017-11-28 15:41:49,995 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(396)) - VM type       = 64-bit
      2017-11-28 15:41:49,996 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(397)) - 1.0% max memory 981.5 MB = 9.8 MB
      2017-11-28 15:41:49,996 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(402)) - capacity      = 2^20 = 1048576 entries
      2017-11-28 15:41:49,999 INFO  [pool-1-thread-1] namenode.FSDirectory (FSDirectory.java:<init>(256)) - ACLs enabled? false
      2017-11-28 15:41:50,000 INFO  [pool-1-thread-1] namenode.FSDirectory (FSDirectory.java:<init>(260)) - XAttrs enabled? true
      2017-11-28 15:41:50,000 INFO  [pool-1-thread-1] namenode.FSDirectory (FSDirectory.java:<init>(268)) - Maximum size of an xattr: 16384
      2017-11-28 15:41:50,000 INFO  [pool-1-thread-1] namenode.NameNode (FSDirectory.java:<init>(321)) - Caching file names occuring more than 10 times
      2017-11-28 15:41:50,013 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(395)) - Computing capacity for map cachedBlocks
      2017-11-28 15:41:50,013 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(396)) - VM type       = 64-bit
      2017-11-28 15:41:50,014 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(397)) - 0.25% max memory 981.5 MB = 2.5 MB
      2017-11-28 15:41:50,014 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(402)) - capacity      = 2^18 = 262144 entries
      2017-11-28 15:41:50,016 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(5574)) - dfs.namenode.safemode.threshold-pct = 0.9990000128746033
      2017-11-28 15:41:50,017 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(5575)) - dfs.namenode.safemode.min.datanodes = 0
      2017-11-28 15:41:50,017 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(5576)) - dfs.namenode.safemode.extension     = 0
      2017-11-28 15:41:50,023 INFO  [pool-1-thread-1] metrics.TopMetrics (TopMetrics.java:logConf(76)) - NNTop conf: dfs.namenode.top.window.num.buckets = 10
      2017-11-28 15:41:50,023 INFO  [pool-1-thread-1] metrics.TopMetrics (TopMetrics.java:logConf(78)) - NNTop conf: dfs.namenode.top.num.users = 10
      2017-11-28 15:41:50,023 INFO  [pool-1-thread-1] metrics.TopMetrics (TopMetrics.java:logConf(80)) - NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
      2017-11-28 15:41:50,091 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:initRetryCache(967)) - Retry cache on namenode is enabled
      2017-11-28 15:41:50,092 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:initRetryCache(975)) - Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
      2017-11-28 15:41:50,096 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(395)) - Computing capacity for map NameNodeRetryCache
      2017-11-28 15:41:50,096 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(396)) - VM type       = 64-bit
      2017-11-28 15:41:50,096 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(397)) - 0.029999999329447746% max memory 981.5 MB = 301.5 KB
      2017-11-28 15:41:50,096 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(402)) - capacity      = 2^15 = 32768 entries
      2017-11-28 15:41:50,180 INFO  [pool-1-thread-1] namenode.FSImage (FSImage.java:format(153)) - Allocated new BlockPoolId: BP-1089370620-172.27.58.198-1511883710117
      2017-11-28 15:41:50,195 INFO  [pool-1-thread-1] common.Storage (NNStorage.java:format(566)) - Storage directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1 has been successfully formatted.
      2017-11-28 15:41:50,197 INFO  [pool-1-thread-1] common.Storage (NNStorage.java:format(566)) - Storage directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name2 has been successfully formatted.
      2017-11-28 15:41:50,214 INFO  [FSImageSaver for /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name2 of type IMAGE_AND_EDITS] namenode.FSImageFormatProtobuf (FSImageFormatProtobuf.java:save(413)) - Saving image file /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name2/current/fsimage.ckpt_0000000000000000000 using no compression
      2017-11-28 15:41:50,214 INFO  [FSImageSaver for /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1 of type IMAGE_AND_EDITS] namenode.FSImageFormatProtobuf (FSImageFormatProtobuf.java:save(413)) - Saving image file /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1/current/fsimage.ckpt_0000000000000000000 using no compression
      2017-11-28 15:41:50,335 INFO  [FSImageSaver for /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1 of type IMAGE_AND_EDITS] namenode.FSImageFormatProtobuf (FSImageFormatProtobuf.java:save(416)) - Image file /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1/current/fsimage.ckpt_0000000000000000000 of size 323 bytes saved in 0 seconds.
      2017-11-28 15:41:50,338 INFO  [FSImageSaver for /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name2 of type IMAGE_AND_EDITS] namenode.FSImageFormatProtobuf (FSImageFormatProtobuf.java:save(416)) - Image file /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name2/current/fsimage.ckpt_0000000000000000000 of size 323 bytes saved in 0 seconds.
      2017-11-28 15:41:50,353 INFO  [pool-1-thread-1] namenode.NNStorageRetentionManager (NNStorageRetentionManager.java:getImageTxIdToRetain(203)) - Going to retain 1 images with txid >= 0
      2017-11-28 15:41:50,357 INFO  [pool-1-thread-1] namenode.NameNode (NameNode.java:createNameNode(1624)) - createNameNode []
      2017-11-28 15:41:50,407 WARN  [pool-1-thread-1] impl.MetricsConfig (MetricsConfig.java:loadFirst(125)) - Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
      2017-11-28 15:41:50,495 INFO  [pool-1-thread-1] impl.MetricsSystemImpl (MetricsSystemImpl.java:startTimer(376)) - Scheduled snapshot period at 10 second(s).
      2017-11-28 15:41:50,496 INFO  [pool-1-thread-1] impl.MetricsSystemImpl (MetricsSystemImpl.java:start(192)) - NameNode metrics system started
      2017-11-28 15:41:50,500 INFO  [pool-1-thread-1] namenode.NameNode (NameNode.java:setClientNamenodeAddress(450)) - fs.defaultFS is hdfs://127.0.0.1:0
      2017-11-28 15:41:50,535 INFO  [org.apache.hadoop.util.JvmPauseMonitor$Monitor@77b45849] util.JvmPauseMonitor (JvmPauseMonitor.java:run(179)) - Starting JVM pause monitor
      2017-11-28 15:41:50,561 INFO  [pool-1-thread-1] hdfs.DFSUtil (DFSUtil.java:httpServerTemplateForNNAndJN(1803)) - Starting Web-server for hdfs at: http://localhost:0
      2017-11-28 15:41:50,727 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog
      2017-11-28 15:41:50,742 INFO  [pool-1-thread-1] server.AuthenticationFilter (AuthenticationFilter.java:constructSecretProvider(296)) - Unable to initialize FileSignerSecretProvider, falling back to use random secrets.
      2017-11-28 15:41:50,752 WARN  [pool-1-thread-1] http.HttpRequestLog (HttpRequestLog.java:getRequestLog(100)) - Jetty request log can only be enabled using Log4j
      2017-11-28 15:41:50,763 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addGlobalFilter(791)) - Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter)
      2017-11-28 15:41:50,767 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(766)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context hdfs
      2017-11-28 15:41:50,767 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(774)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static
      2017-11-28 15:41:50,771 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(774)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context logs
      2017-11-28 15:41:50,771 INFO  [pool-1-thread-1] security.HttpCrossOriginFilterInitializer (HttpCrossOriginFilterInitializer.java:initFilter(49)) - CORS filter not enabled. Please set hadoop.http.cross-origin.enabled to 'true' to enable it
      2017-11-28 15:41:50,965 INFO  [pool-1-thread-1] http.HttpServer2 (NameNodeHttpServer.java:initWebHdfs(93)) - Added filter 'org.apache.hadoop.hdfs.web.AuthFilter' (class=org.apache.hadoop.hdfs.web.AuthFilter)
      2017-11-28 15:41:50,969 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addJerseyResourcePackage(690)) - addJerseyResourcePackage: packageName=org.apache.hadoop.hdfs.server.namenode.web.resources;org.apache.hadoop.hdfs.web.resources, pathSpec=/webhdfs/v1/*
      2017-11-28 15:41:50,997 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:bindListener(989)) - Jetty bound to port 46377
      2017-11-28 15:41:50,998 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - jetty-6.1.14
      2017-11-28 15:41:51,039 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - Extract jar:file:/grid/0/nobody/.m2/repository/org/apache/hadoop/hadoop-hdfs/2.7.3.2.6.4.0-SNAPSHOT/hadoop-hdfs-2.7.3.2.6.4.0-SNAPSHOT-tests.jar!/webapps/hdfs to /tmp/Jetty_localhost_46377_hdfs____kc669n/webapp
      2017-11-28 15:41:51,282 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:46377
      2017-11-28 15:41:51,288 INFO  [pool-1-thread-1] namenode.FSEditLog (FSEditLog.java:newInstance(225)) - Edit logging is async:false
      2017-11-28 15:41:51,289 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(758)) - No KeyProvider found.
      2017-11-28 15:41:51,289 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(768)) - fsLock is fair:true
      2017-11-28 15:41:51,289 INFO  [pool-1-thread-1] common.Util (Util.java:isDiskStatsEnabled(111)) - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
      2017-11-28 15:41:51,290 INFO  [pool-1-thread-1] blockmanagement.DatanodeManager (DatanodeManager.java:<init>(274)) - dfs.block.invalidate.limit=1000
      2017-11-28 15:41:51,290 INFO  [pool-1-thread-1] blockmanagement.DatanodeManager (DatanodeManager.java:<init>(280)) - dfs.namenode.datanode.registration.ip-hostname-check=true
      2017-11-28 15:41:51,290 INFO  [pool-1-thread-1] blockmanagement.BlockManager (InvalidateBlocks.java:printBlockDeletionTime(71)) - dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
      2017-11-28 15:41:51,291 INFO  [pool-1-thread-1] blockmanagement.BlockManager (InvalidateBlocks.java:printBlockDeletionTime(76)) - The block deletion will start around 2017 Nov 28 15:41:51
      2017-11-28 15:41:51,291 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(395)) - Computing capacity for map BlocksMap
      2017-11-28 15:41:51,291 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(396)) - VM type       = 64-bit
      2017-11-28 15:41:51,291 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(397)) - 2.0% max memory 910.5 MB = 18.2 MB
      2017-11-28 15:41:51,292 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(402)) - capacity      = 2^21 = 2097152 entries
      2017-11-28 15:41:51,295 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:createBlockTokenSecretManager(409)) - dfs.block.access.token.enable=false
      2017-11-28 15:41:51,295 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(395)) - defaultReplication         = 2
      2017-11-28 15:41:51,295 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(396)) - maxReplication             = 512
      2017-11-28 15:41:51,295 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(397)) - minReplication             = 1
      2017-11-28 15:41:51,296 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(398)) - maxReplicationStreams      = 2
      2017-11-28 15:41:51,296 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(399)) - replicationRecheckInterval = 3000
      2017-11-28 15:41:51,296 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(400)) - encryptDataTransfer        = false
      2017-11-28 15:41:51,296 INFO  [pool-1-thread-1] blockmanagement.BlockManager (BlockManager.java:<init>(401)) - maxNumBlocksToLog          = 1000
      2017-11-28 15:41:51,296 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(789)) - fsOwner             = nobody (auth:SIMPLE)
      2017-11-28 15:41:51,296 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(790)) - supergroup          = supergroup
      2017-11-28 15:41:51,296 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(791)) - isPermissionEnabled = true
      2017-11-28 15:41:51,297 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(802)) - HA Enabled: false
      2017-11-28 15:41:51,297 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(839)) - Append Enabled: true
      2017-11-28 15:41:51,297 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(395)) - Computing capacity for map INodeMap
      2017-11-28 15:41:51,297 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(396)) - VM type       = 64-bit
      2017-11-28 15:41:51,298 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(397)) - 1.0% max memory 910.5 MB = 9.1 MB
      2017-11-28 15:41:51,298 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(402)) - capacity      = 2^20 = 1048576 entries
      2017-11-28 15:41:51,299 INFO  [pool-1-thread-1] namenode.FSDirectory (FSDirectory.java:<init>(256)) - ACLs enabled? false
      2017-11-28 15:41:51,300 INFO  [pool-1-thread-1] namenode.FSDirectory (FSDirectory.java:<init>(260)) - XAttrs enabled? true
      2017-11-28 15:41:51,300 INFO  [pool-1-thread-1] namenode.FSDirectory (FSDirectory.java:<init>(268)) - Maximum size of an xattr: 16384
      2017-11-28 15:41:51,300 INFO  [pool-1-thread-1] namenode.NameNode (FSDirectory.java:<init>(321)) - Caching file names occuring more than 10 times
      2017-11-28 15:41:51,300 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(395)) - Computing capacity for map cachedBlocks
      2017-11-28 15:41:51,300 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(396)) - VM type       = 64-bit
      2017-11-28 15:41:51,300 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(397)) - 0.25% max memory 910.5 MB = 2.3 MB
      2017-11-28 15:41:51,301 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(402)) - capacity      = 2^18 = 262144 entries
      2017-11-28 15:41:51,301 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(5574)) - dfs.namenode.safemode.threshold-pct = 0.9990000128746033
      2017-11-28 15:41:51,302 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(5575)) - dfs.namenode.safemode.min.datanodes = 0
      2017-11-28 15:41:51,302 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:<init>(5576)) - dfs.namenode.safemode.extension     = 0
      2017-11-28 15:41:51,302 INFO  [pool-1-thread-1] metrics.TopMetrics (TopMetrics.java:logConf(76)) - NNTop conf: dfs.namenode.top.window.num.buckets = 10
      2017-11-28 15:41:51,302 INFO  [pool-1-thread-1] metrics.TopMetrics (TopMetrics.java:logConf(78)) - NNTop conf: dfs.namenode.top.num.users = 10
      2017-11-28 15:41:51,302 INFO  [pool-1-thread-1] metrics.TopMetrics (TopMetrics.java:logConf(80)) - NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
      2017-11-28 15:41:51,302 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:initRetryCache(967)) - Retry cache on namenode is enabled
      2017-11-28 15:41:51,303 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:initRetryCache(975)) - Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
      2017-11-28 15:41:51,303 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(395)) - Computing capacity for map NameNodeRetryCache
      2017-11-28 15:41:51,303 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(396)) - VM type       = 64-bit
      2017-11-28 15:41:51,303 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(397)) - 0.029999999329447746% max memory 910.5 MB = 279.7 KB
      2017-11-28 15:41:51,303 INFO  [pool-1-thread-1] util.GSet (LightWeightGSet.java:computeCapacity(402)) - capacity      = 2^15 = 32768 entries
      2017-11-28 15:41:51,308 INFO  [pool-1-thread-1] common.Storage (Storage.java:tryLock(776)) - Lock on /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1/in_use.lock acquired by nodename 159376@ctr-e134-1499953498516-345856-01-000007.hwx.site
      2017-11-28 15:41:51,311 INFO  [pool-1-thread-1] common.Storage (Storage.java:tryLock(776)) - Lock on /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name2/in_use.lock acquired by nodename 159376@ctr-e134-1499953498516-345856-01-000007.hwx.site
      2017-11-28 15:41:51,314 INFO  [pool-1-thread-1] namenode.FileJournalManager (FileJournalManager.java:recoverUnfinalizedSegments(388)) - Recovering unfinalized segments in /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1/current
      2017-11-28 15:41:51,314 INFO  [pool-1-thread-1] namenode.FileJournalManager (FileJournalManager.java:recoverUnfinalizedSegments(388)) - Recovering unfinalized segments in /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name2/current
      2017-11-28 15:41:51,314 INFO  [pool-1-thread-1] namenode.FSImage (FSImage.java:loadFSImage(664)) - No edit log streams selected.
      2017-11-28 15:41:51,314 INFO  [pool-1-thread-1] namenode.FSImage (FSImage.java:loadFSImageFile(731)) - Planning to load image: FSImageFile(file=/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1/current/fsimage_0000000000000000000, cpktTxId=0000000000000000000)
      2017-11-28 15:41:51,331 INFO  [pool-1-thread-1] namenode.FSImageFormatPBINode (FSImageFormatPBINode.java:loadINodeSection(257)) - Loading 1 INodes.
      2017-11-28 15:41:51,340 INFO  [pool-1-thread-1] namenode.FSImageFormatProtobuf (FSImageFormatProtobuf.java:load(184)) - Loaded FSImage in 0 seconds.
      2017-11-28 15:41:51,340 INFO  [pool-1-thread-1] namenode.FSImage (FSImage.java:loadFSImage(897)) - Loaded image for txid 0 from /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/name1/current/fsimage_0000000000000000000
      2017-11-28 15:41:51,345 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:loadFSImage(1079)) - Need to save fs image? false (staleImage=false, haEnabled=false, isRollingUpgrade=false)
      2017-11-28 15:41:51,345 INFO  [pool-1-thread-1] namenode.FSEditLog (FSEditLog.java:startLogSegment(1294)) - Starting log segment at 1
      2017-11-28 15:41:51,372 INFO  [pool-1-thread-1] namenode.NameCache (NameCache.java:initialized(143)) - initialized with 0 entries 0 lookups
      2017-11-28 15:41:51,372 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:loadFromDisk(730)) - Finished loading FSImage in 68 msecs
      2017-11-28 15:41:51,638 INFO  [pool-1-thread-1] namenode.NameNode (NameNodeRpcServer.java:<init>(428)) - RPC server is binding to localhost:0
      2017-11-28 15:41:51,649 INFO  [pool-1-thread-1] ipc.CallQueueManager (CallQueueManager.java:<init>(75)) - Using callQueue: class java.util.concurrent.LinkedBlockingQueue scheduler: class org.apache.hadoop.ipc.DefaultRpcScheduler
      2017-11-28 15:41:51,671 INFO  [Socket Reader #1 for port 38503] ipc.Server (Server.java:run(821)) - Starting Socket Reader #1 for port 38503
      2017-11-28 15:41:51,772 INFO  [pool-1-thread-1] namenode.NameNode (NameNode.java:initialize(772)) - Clients are to use localhost:38503 to access this namenode/service.
      2017-11-28 15:41:51,774 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:registerMBean(6509)) - Registered FSNamesystemState MBean
      2017-11-28 15:41:51,794 INFO  [pool-1-thread-1] namenode.LeaseManager (LeaseManager.java:getNumUnderConstructionBlocks(139)) - Number of blocks under construction: 0
      2017-11-28 15:41:51,795 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:initializeReplQueues(1271)) - initializing replication queues
      2017-11-28 15:41:51,795 INFO  [pool-1-thread-1] hdfs.StateChange (FSNamesystem.java:leave(5661)) - STATE* Leaving safe mode after 0 secs
      2017-11-28 15:41:51,795 INFO  [pool-1-thread-1] hdfs.StateChange (FSNamesystem.java:leave(5673)) - STATE* Network topology has 0 racks and 0 datanodes
      2017-11-28 15:41:51,795 INFO  [pool-1-thread-1] hdfs.StateChange (FSNamesystem.java:leave(5676)) - STATE* UnderReplicatedBlocks has 0 blocks
      2017-11-28 15:41:51,806 INFO  [pool-1-thread-1] blockmanagement.DatanodeDescriptor (DatanodeDescriptor.java:updateHeartbeatState(401)) - Number of failed storage changes from 0 to 0
      2017-11-28 15:41:51,809 INFO  [Replication Queue Initializer] blockmanagement.BlockManager (BlockManager.java:processMisReplicatesAsync(2936)) - Total number of blocks            = 0
      2017-11-28 15:41:51,809 INFO  [Replication Queue Initializer] blockmanagement.BlockManager (BlockManager.java:processMisReplicatesAsync(2937)) - Number of invalid blocks          = 0
      2017-11-28 15:41:51,809 INFO  [Replication Queue Initializer] blockmanagement.BlockManager (BlockManager.java:processMisReplicatesAsync(2938)) - Number of under-replicated blocks = 0
      2017-11-28 15:41:51,809 INFO  [Replication Queue Initializer] blockmanagement.BlockManager (BlockManager.java:processMisReplicatesAsync(2939)) - Number of  over-replicated blocks = 0
      2017-11-28 15:41:51,809 INFO  [Replication Queue Initializer] blockmanagement.BlockManager (BlockManager.java:processMisReplicatesAsync(2941)) - Number of blocks being written    = 0
      2017-11-28 15:41:51,809 INFO  [Replication Queue Initializer] hdfs.StateChange (BlockManager.java:processMisReplicatesAsync(2943)) - STATE* Replication Queue initialization scan for invalid, over- and under-replicated blocks completed in 15 msec
      2017-11-28 15:41:51,810 INFO  [org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$ReplicationMonitor@23383605] BlockStateChange (BlockManager.java:computeReplicationWorkForBlocks(1653)) - BLOCK* neededReplications = 0, pendingReplications = 0.
      2017-11-28 15:41:51,846 INFO  [IPC Server listener on 38503] ipc.Server (Server.java:run(900)) - IPC Server listener on 38503: starting
      2017-11-28 15:41:51,846 INFO  [IPC Server Responder] ipc.Server (Server.java:run(1064)) - IPC Server Responder: starting
      2017-11-28 15:41:51,850 INFO  [pool-1-thread-1] namenode.NameNode (NameNode.java:startCommonServices(885)) - NameNode RPC up at: localhost/127.0.0.1:38503
      2017-11-28 15:41:51,850 WARN  [pool-1-thread-1] namenode.NameNode (NameNode.java:makeMetricsLoggerAsync(828)) - Metrics logging will not be async since the logger is not log4j
      2017-11-28 15:41:51,851 INFO  [pool-1-thread-1] namenode.FSNamesystem (FSNamesystem.java:startActiveServices(1187)) - Starting services required for active state
      2017-11-28 15:41:51,851 INFO  [pool-1-thread-1] namenode.FSDirectory (FSDirectory.java:updateCountForQuota(708)) - Initializing quota with 4 thread(s)
      2017-11-28 15:41:51,855 INFO  [pool-1-thread-1] namenode.FSDirectory (FSDirectory.java:updateCountForQuota(717)) - Quota initialization completed in 3 milliseconds
      name space=1
      storage space=0
      storage types=RAM_DISK=0, SSD=0, DISK=0, ARCHIVE=0
      2017-11-28 15:41:51,859 INFO  [CacheReplicationMonitor(503514335)] blockmanagement.CacheReplicationMonitor (CacheReplicationMonitor.java:run(161)) - Starting CacheReplicationMonitor with interval 30000 milliseconds
      2017-11-28 15:41:51,865 INFO  [pool-1-thread-1] hdfs.MiniDFSCluster (MiniDFSCluster.java:startDataNodes(1458)) - Starting DataNode 0 with dfs.datanode.data.dir: [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1,[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2
      2017-11-28 15:41:52,185 INFO  [pool-1-thread-1] checker.ThrottledAsyncChecker (ThrottledAsyncChecker.java:schedule(122)) - Scheduling a check for [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/
      2017-11-28 15:41:52,208 INFO  [pool-1-thread-1] checker.ThrottledAsyncChecker (ThrottledAsyncChecker.java:schedule(122)) - Scheduling a check for [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/
      2017-11-28 15:41:52,269 INFO  [pool-1-thread-1] impl.MetricsSystemImpl (MetricsSystemImpl.java:init(159)) - DataNode metrics system started (again)
      2017-11-28 15:41:52,273 INFO  [pool-1-thread-1] datanode.BlockScanner (BlockScanner.java:<init>(180)) - Initialized block scanner with targetBytesPerSec 1048576
      2017-11-28 15:41:52,277 INFO  [pool-1-thread-1] common.Util (Util.java:isDiskStatsEnabled(111)) - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
      2017-11-28 15:41:52,281 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:<init>(455)) - Configured hostname is 127.0.0.1
      2017-11-28 15:41:52,282 INFO  [pool-1-thread-1] common.Util (Util.java:isDiskStatsEnabled(111)) - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
      2017-11-28 15:41:52,282 WARN  [pool-1-thread-1] conf.Configuration (Configuration.java:getTimeDurationHelper(1592)) - No unit for dfs.datanode.outliers.report.interval(1800000) assuming MILLISECONDS
      2017-11-28 15:41:52,287 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:startDataNode(1251)) - Starting DataNode with maxLockedMemory = 0
      2017-11-28 15:41:52,299 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:initDataXceiver(1028)) - Opened streaming server at /127.0.0.1:37352
      2017-11-28 15:41:52,303 INFO  [pool-1-thread-1] datanode.DataNode (DataXceiverServer.java:<init>(78)) - Balancing bandwith is 1048576 bytes/s
      2017-11-28 15:41:52,304 INFO  [pool-1-thread-1] datanode.DataNode (DataXceiverServer.java:<init>(79)) - Number threads for balancing is 5
      2017-11-28 15:41:52,316 INFO  [pool-1-thread-1] server.AuthenticationFilter (AuthenticationFilter.java:constructSecretProvider(296)) - Unable to initialize FileSignerSecretProvider, falling back to use random secrets.
      2017-11-28 15:41:52,317 WARN  [pool-1-thread-1] http.HttpRequestLog (HttpRequestLog.java:getRequestLog(100)) - Jetty request log can only be enabled using Log4j
      2017-11-28 15:41:52,318 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addGlobalFilter(791)) - Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter)
      2017-11-28 15:41:52,319 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(766)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context datanode
      2017-11-28 15:41:52,319 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(774)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context logs
      2017-11-28 15:41:52,319 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(774)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static
      2017-11-28 15:41:52,319 INFO  [pool-1-thread-1] security.HttpCrossOriginFilterInitializer (HttpCrossOriginFilterInitializer.java:initFilter(49)) - CORS filter not enabled. Please set hadoop.http.cross-origin.enabled to 'true' to enable it
      2017-11-28 15:41:52,326 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:bindListener(989)) - Jetty bound to port 36342
      2017-11-28 15:41:52,326 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - jetty-6.1.14
      2017-11-28 15:41:52,331 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - Extract jar:file:/grid/0/nobody/.m2/repository/org/apache/hadoop/hadoop-hdfs/2.7.3.2.6.4.0-SNAPSHOT/hadoop-hdfs-2.7.3.2.6.4.0-SNAPSHOT-tests.jar!/webapps/datanode to /tmp/Jetty_localhost_36342_datanode____.s94vhn/webapp
      2017-11-28 15:41:52,465 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:36342
      2017-11-28 15:41:52,611 INFO  [pool-1-thread-1] web.DatanodeHttpServer (DatanodeHttpServer.java:start(233)) - Listening HTTP traffic on /127.0.0.1:38743
      2017-11-28 15:41:52,611 INFO  [org.apache.hadoop.util.JvmPauseMonitor$Monitor@2f3cec05] util.JvmPauseMonitor (JvmPauseMonitor.java:run(179)) - Starting JVM pause monitor
      2017-11-28 15:41:52,613 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:startDataNode(1277)) - dnUserName = nobody
      2017-11-28 15:41:52,613 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:startDataNode(1278)) - supergroup = supergroup
      2017-11-28 15:41:52,632 INFO  [pool-1-thread-1] ipc.CallQueueManager (CallQueueManager.java:<init>(75)) - Using callQueue: class java.util.concurrent.LinkedBlockingQueue scheduler: class org.apache.hadoop.ipc.DefaultRpcScheduler
      2017-11-28 15:41:52,633 INFO  [Socket Reader #1 for port 41421] ipc.Server (Server.java:run(821)) - Starting Socket Reader #1 for port 41421
      2017-11-28 15:41:52,647 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:initIpcServer(941)) - Opened IPC server at /127.0.0.1:41421
      2017-11-28 15:41:52,660 INFO  [pool-1-thread-1] datanode.DataNode (BlockPoolManager.java:refreshNamenodes(152)) - Refresh request received for nameservices: null
      2017-11-28 15:41:52,663 INFO  [pool-1-thread-1] datanode.DataNode (BlockPoolManager.java:doRefreshNamenodes(201)) - Starting BPOfferServices for nameservices: <default>
      2017-11-28 15:41:52,676 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/]]  heartbeating to localhost/127.0.0.1:38503] datanode.DataNode (BPServiceActor.java:run(761)) - Block pool <registering> (Datanode Uuid unassigned) service to localhost/127.0.0.1:38503 starting to offer service
      2017-11-28 15:41:52,683 INFO  [IPC Server Responder] ipc.Server (Server.java:run(1064)) - IPC Server Responder: starting
      2017-11-28 15:41:52,683 INFO  [IPC Server listener on 41421] ipc.Server (Server.java:run(900)) - IPC Server listener on 41421: starting
      2017-11-28 15:41:52,687 INFO  [pool-1-thread-1] hdfs.MiniDFSCluster (MiniDFSCluster.java:startDataNodes(1458)) - Starting DataNode 1 with dfs.datanode.data.dir: [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3,[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4
      2017-11-28 15:41:52,688 INFO  [pool-1-thread-1] checker.ThrottledAsyncChecker (ThrottledAsyncChecker.java:schedule(122)) - Scheduling a check for [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/
      2017-11-28 15:41:52,689 INFO  [pool-1-thread-1] checker.ThrottledAsyncChecker (ThrottledAsyncChecker.java:schedule(122)) - Scheduling a check for [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/
      2017-11-28 15:41:52,705 INFO  [pool-1-thread-1] impl.MetricsSystemImpl (MetricsSystemImpl.java:init(159)) - DataNode metrics system started (again)
      2017-11-28 15:41:52,705 INFO  [pool-1-thread-1] datanode.BlockScanner (BlockScanner.java:<init>(180)) - Initialized block scanner with targetBytesPerSec 1048576
      2017-11-28 15:41:52,705 INFO  [pool-1-thread-1] common.Util (Util.java:isDiskStatsEnabled(111)) - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
      2017-11-28 15:41:52,706 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:<init>(455)) - Configured hostname is 127.0.0.1
      2017-11-28 15:41:52,706 INFO  [pool-1-thread-1] common.Util (Util.java:isDiskStatsEnabled(111)) - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
      2017-11-28 15:41:52,706 WARN  [pool-1-thread-1] conf.Configuration (Configuration.java:getTimeDurationHelper(1592)) - No unit for dfs.datanode.outliers.report.interval(1800000) assuming MILLISECONDS
      2017-11-28 15:41:52,706 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:startDataNode(1251)) - Starting DataNode with maxLockedMemory = 0
      2017-11-28 15:41:52,707 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:initDataXceiver(1028)) - Opened streaming server at /127.0.0.1:41272
      2017-11-28 15:41:52,707 INFO  [pool-1-thread-1] datanode.DataNode (DataXceiverServer.java:<init>(78)) - Balancing bandwith is 1048576 bytes/s
      2017-11-28 15:41:52,707 INFO  [pool-1-thread-1] datanode.DataNode (DataXceiverServer.java:<init>(79)) - Number threads for balancing is 5
      2017-11-28 15:41:52,710 INFO  [pool-1-thread-1] server.AuthenticationFilter (AuthenticationFilter.java:constructSecretProvider(296)) - Unable to initialize FileSignerSecretProvider, falling back to use random secrets.
      2017-11-28 15:41:52,711 WARN  [pool-1-thread-1] http.HttpRequestLog (HttpRequestLog.java:getRequestLog(100)) - Jetty request log can only be enabled using Log4j
      2017-11-28 15:41:52,712 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addGlobalFilter(791)) - Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter)
      2017-11-28 15:41:52,713 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(766)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context datanode
      2017-11-28 15:41:52,713 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(774)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static
      2017-11-28 15:41:52,713 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:addFilter(774)) - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context logs
      2017-11-28 15:41:52,713 INFO  [pool-1-thread-1] security.HttpCrossOriginFilterInitializer (HttpCrossOriginFilterInitializer.java:initFilter(49)) - CORS filter not enabled. Please set hadoop.http.cross-origin.enabled to 'true' to enable it
      2017-11-28 15:41:52,714 INFO  [pool-1-thread-1] http.HttpServer2 (HttpServer2.java:bindListener(989)) - Jetty bound to port 44682
      2017-11-28 15:41:52,714 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - jetty-6.1.14
      2017-11-28 15:41:52,719 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - Extract jar:file:/grid/0/nobody/.m2/repository/org/apache/hadoop/hadoop-hdfs/2.7.3.2.6.4.0-SNAPSHOT/hadoop-hdfs-2.7.3.2.6.4.0-SNAPSHOT-tests.jar!/webapps/datanode to /tmp/Jetty_localhost_44682_datanode____.inx2i1/webapp
      2017-11-28 15:41:52,856 INFO  [pool-1-thread-1] mortbay.log (Slf4jLog.java:info(67)) - Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:44682
      2017-11-28 15:41:52,871 INFO  [pool-1-thread-1] web.DatanodeHttpServer (DatanodeHttpServer.java:start(233)) - Listening HTTP traffic on /127.0.0.1:45542
      2017-11-28 15:41:52,872 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:startDataNode(1277)) - dnUserName = nobody
      2017-11-28 15:41:52,872 INFO  [org.apache.hadoop.util.JvmPauseMonitor$Monitor@376961fa] util.JvmPauseMonitor (JvmPauseMonitor.java:run(179)) - Starting JVM pause monitor
      2017-11-28 15:41:52,872 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:startDataNode(1278)) - supergroup = supergroup
      2017-11-28 15:41:52,872 INFO  [pool-1-thread-1] ipc.CallQueueManager (CallQueueManager.java:<init>(75)) - Using callQueue: class java.util.concurrent.LinkedBlockingQueue scheduler: class org.apache.hadoop.ipc.DefaultRpcScheduler
      2017-11-28 15:41:52,873 INFO  [Socket Reader #1 for port 46351] ipc.Server (Server.java:run(821)) - Starting Socket Reader #1 for port 46351
      2017-11-28 15:41:52,879 INFO  [pool-1-thread-1] datanode.DataNode (DataNode.java:initIpcServer(941)) - Opened IPC server at /127.0.0.1:46351
      2017-11-28 15:41:52,882 INFO  [pool-1-thread-1] datanode.DataNode (BlockPoolManager.java:refreshNamenodes(152)) - Refresh request received for nameservices: null
      2017-11-28 15:41:52,883 INFO  [pool-1-thread-1] datanode.DataNode (BlockPoolManager.java:doRefreshNamenodes(201)) - Starting BPOfferServices for nameservices: <default>
      2017-11-28 15:41:52,883 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/]]  heartbeating to localhost/127.0.0.1:38503] datanode.DataNode (BPServiceActor.java:run(761)) - Block pool <registering> (Datanode Uuid unassigned) service to localhost/127.0.0.1:38503 starting to offer service
      2017-11-28 15:41:52,884 INFO  [IPC Server listener on 46351] ipc.Server (Server.java:run(900)) - IPC Server listener on 46351: starting
      2017-11-28 15:41:52,885 INFO  [IPC Server Responder] ipc.Server (Server.java:run(1064)) - IPC Server Responder: starting
      2017-11-28 15:41:53,105 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:getParallelVolumeLoadThreadsNum(384)) - Using 2 threads to upgrade data directories (dfs.datanode.parallel.volumes.load.threads.num=2, dataDirs=2)
      2017-11-28 15:41:53,105 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:getParallelVolumeLoadThreadsNum(384)) - Using 2 threads to upgrade data directories (dfs.datanode.parallel.volumes.load.threads.num=2, dataDirs=2)
      2017-11-28 15:41:53,107 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (Storage.java:tryLock(776)) - Lock on /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/in_use.lock acquired by nodename 159376@ctr-e134-1499953498516-345856-01-000007.hwx.site
      2017-11-28 15:41:53,107 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (Storage.java:tryLock(776)) - Lock on /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/in_use.lock acquired by nodename 159376@ctr-e134-1499953498516-345856-01-000007.hwx.site
      2017-11-28 15:41:53,108 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:loadStorageDirectory(309)) - Storage directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1 is not formatted for namespace 1907606034. Formatting...
      2017-11-28 15:41:53,108 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:loadStorageDirectory(309)) - Storage directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3 is not formatted for namespace 1907606034. Formatting...
      2017-11-28 15:41:53,110 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:createStorageID(187)) - Generated new storageID DS-f0a27a63-d51e-48b5-aef1-2782ed1194d8 for directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1
      2017-11-28 15:41:53,110 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:createStorageID(187)) - Generated new storageID DS-207ff42e-e232-4cb0-91e1-6c983d9187b2 for directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3
      2017-11-28 15:41:53,114 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (Storage.java:tryLock(776)) - Lock on /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/in_use.lock acquired by nodename 159376@ctr-e134-1499953498516-345856-01-000007.hwx.site
      2017-11-28 15:41:53,114 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (Storage.java:tryLock(776)) - Lock on /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/in_use.lock acquired by nodename 159376@ctr-e134-1499953498516-345856-01-000007.hwx.site
      2017-11-28 15:41:53,115 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:loadStorageDirectory(309)) - Storage directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2 is not formatted for namespace 1907606034. Formatting...
      2017-11-28 15:41:53,116 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data3/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:loadStorageDirectory(309)) - Storage directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data4 is not formatted for namespace 1907606034. Formatting...
      2017-11-28 15:41:53,116 INFO  [DataNode: [[[DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data1/, [DISK]file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/build/test/data/dfs/data/data2/]]  heartbeating to localhost/127.0.0.1:38503] common.Storage (DataStorage.java:createStorageID(187)) - Generated new storageID DS-e9f121ff-b694-4e58-abaa-fdf4ac80b386 for directory /grid/0/nobody/workspace/build-support/SOURCES/oozie/
      ...[truncated 5183895 chars]...
      pAttemptImpl:809 - appattempt_1511883728609_0005_000001 State change from LAUNCHED to RUNNING
      16:04:00,539  INFO RMAppImpl:779 - application_1511883728609_0005 State change from ACCEPTED to RUNNING
      16:04:00,578  INFO FSEditLog:771 - Number of transactions: 7405 Total time for transactions(ms): 205 Number of transactions batched in Syncs: 2919 Number of syncs: 4486 SyncTimes(ms): 235 79 
      16:04:00,579  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=create	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1.jhist	dst=null	perm=nobody:supergroup:rw-rw-rw-	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:00,659  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=create	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1_conf.xml	dst=null	perm=nobody:supergroup:rw-rw-rw-	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:00,808  INFO StateChange:3862 - BLOCK* allocate blk_1073743269_2445, replicas=127.0.0.1:41272, 127.0.0.1:37352 for /tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1_conf.xml
      16:04:00,894  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743269_2445 src: /127.0.0.1:44658 dest: /127.0.0.1:41272
      16:04:00,895  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743269_2445 src: /127.0.0.1:51170 dest: /127.0.0.1:37352
      16:04:00,938  INFO clienttrace:1490 - src: /127.0.0.1:51170, dest: /127.0.0.1:37352, bytes: 141926, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: c355628b-02f4-4102-9823-149c581be5bc, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743269_2445, duration: 41616333
      16:04:00,938  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743269_2445, type=LAST_IN_PIPELINE terminating
      16:04:00,939  INFO clienttrace:1490 - src: /127.0.0.1:44658, dest: /127.0.0.1:41272, bytes: 141926, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: 47252ceb-afae-4640-acde-3a2b425713b6, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743269_2445, duration: 42980160
      16:04:00,939  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743269_2445, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37352] terminating
      16:04:00,944  INFO StateChange:3755 - DIR* completeFile: /tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1_conf.xml is closed by DFSClient_NONMAPREDUCE_-1622970731_1
      16:04:01,039  INFO BlockManager:1660 - Blocks chosen but could not be replicated = 1; of which 1 have no target, 0 have no source, 0 are UC, 0 are abandoned, 0 already have enough replicas.
      16:04:02,077  INFO RMContainerImpl:422 - container_1511883728609_0005_01_000002 Container Transitioned from NEW to ALLOCATED
      16:04:02,077  INFO RMAuditLogger:141 - USER=nobody	OPERATION=AM Allocated Container	TARGET=SchedulerApp	RESULT=SUCCESS	APPID=application_1511883728609_0005	CONTAINERID=container_1511883728609_0005_01_000002
      16:04:02,077  INFO SchedulerNode:152 - Assigned container container_1511883728609_0005_01_000002 of capacity <memory:1024, vCores:1> on host ctr-e134-1499953498516-345856-01-000007.hwx.site:43216, which has 1 containers, <memory:1024, vCores:1> used and <memory:3072, vCores:7> available after allocation
      16:04:02,077  INFO AbstractContainerAllocator:89 - assignedContainer application attempt=appattempt_1511883728609_0005_000001 container=container_1511883728609_0005_01_000002 queue=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.RegularContainerAllocator@7a8629b9 clusterResource=<memory:8192, vCores:16> type=OFF_SWITCH
      16:04:02,078  INFO ParentQueue:475 - assignedContainer queue=root usedCapacity=0.375 absoluteUsedCapacity=0.375 used=<memory:3072, vCores:2> cluster=<memory:8192, vCores:16>
      16:04:02,612  INFO NMTokenSecretManagerInRM:200 - Sending NMToken for nodeId : ctr-e134-1499953498516-345856-01-000007.hwx.site:43216 for container : container_1511883728609_0005_01_000002
      16:04:02,613  INFO RMContainerImpl:422 - container_1511883728609_0005_01_000002 Container Transitioned from ALLOCATED to ACQUIRED
      16:04:02,693  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job.xml	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,694  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job.xml	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,714  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/MRAppJar.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,716  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/hadoop-mapreduce-client-jobclient-2.7.3.2.6.4.0-SNAPSHOT-tests.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,718  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/other.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,719  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/myfunctions.py	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,720  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/py4j-0.9-src.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,722  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pyspark.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,723  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pi.py	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,762  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/MRAppJar.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,763  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/hadoop-mapreduce-client-jobclient-2.7.3.2.6.4.0-SNAPSHOT-tests.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,764  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/other.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,765  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/myfunctions.py	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,766  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/py4j-0.9-src.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,768  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pyspark.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,769  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pi.py	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,770  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/MRAppJar.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,771  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/hadoop-mapreduce-client-jobclient-2.7.3.2.6.4.0-SNAPSHOT-tests.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,772  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/MRAppJar.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,773  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/hadoop-mapreduce-client-jobclient-2.7.3.2.6.4.0-SNAPSHOT-tests.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,774  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/other.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,775  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/myfunctions.py	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,777  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/py4j-0.9-src.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,779  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pyspark.zip	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,780  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pi.py	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,781  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/MRAppJar.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,782  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/nobody/target/MiniMRCluster_900807203-tmpDir/hadoop-mapreduce-client-jobclient-2.7.3.2.6.4.0-SNAPSHOT-tests.jar	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:02,901  INFO ContainerManagerImpl:810 - Start request for container_1511883728609_0005_01_000002 by user nobody
      16:04:02,903  INFO ContainerManagerImpl:850 - Creating a new application reference for app application_1511883728609_0005
      16:04:02,904  INFO NMAuditLogger:89 - USER=nobody	IP=172.27.58.198	OPERATION=Start Container Request	TARGET=ContainerManageImpl	RESULT=SUCCESS	APPID=application_1511883728609_0005	CONTAINERID=container_1511883728609_0005_01_000002
      16:04:02,904  INFO ApplicationImpl:464 - Application application_1511883728609_0005 transitioned from NEW to INITING
      16:04:02,904  INFO ApplicationImpl:304 - Adding container_1511883728609_0005_01_000002 to application application_1511883728609_0005
      16:04:02,904  INFO ApplicationImpl:464 - Application application_1511883728609_0005 transitioned from INITING to RUNNING
      16:04:02,905  INFO ContainerImpl:1163 - Container container_1511883728609_0005_01_000002 transitioned from NEW to LOCALIZING
      16:04:02,905  INFO AuxServices:215 - Got event CONTAINER_INIT for appId application_1511883728609_0005
      16:04:02,905  INFO AuxServices:215 - Got event APPLICATION_INIT for appId application_1511883728609_0005
      16:04:02,905  INFO AuxServices:219 - Got APPLICATION_INIT for service mapreduce_shuffle
      16:04:02,905  INFO ShuffleHandler:681 - Added token for job_1511883728609_0005
      16:04:02,905  INFO LocalizedResource:203 - Resource hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pyspark.zip transitioned from INIT to DOWNLOADING
      16:04:02,905  INFO LocalizedResource:203 - Resource hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pi.py transitioned from INIT to DOWNLOADING
      16:04:02,905  INFO LocalizedResource:203 - Resource hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/py4j-0.9-src.zip transitioned from INIT to DOWNLOADING
      16:04:02,906  INFO LocalizedResource:203 - Resource hdfs://localhost:38503/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job.xml transitioned from INIT to DOWNLOADING
      16:04:02,906  INFO ResourceLocalizationService:784 - Downloading public rsrc:{ hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pyspark.zip, 1511885030140, FILE, null }
      16:04:02,989  INFO ResourceLocalizationService:784 - Downloading public rsrc:{ hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pi.py, 1511885030145, FILE, null }
      16:04:03,017  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pyspark.zip	dst=null	perm=null	proto=rpc
      16:04:03,018  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib	dst=null	perm=null	proto=rpc
      16:04:03,019  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app	dst=null	perm=null	proto=rpc
      16:04:03,019  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30	dst=null	perm=null	proto=rpc
      16:04:03,020  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test	dst=null	perm=null	proto=rpc
      16:04:03,020  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user	dst=null	perm=null	proto=rpc
      16:04:03,021  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/	dst=null	perm=null	proto=rpc
      16:04:03,022  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pyspark.zip	dst=null	perm=null	proto=rpc
      16:04:03,072  INFO LocalizedResource:203 - Resource hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pyspark.zip(->/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_0/filecache/18/pyspark.zip) transitioned from DOWNLOADING to LOCALIZED
      16:04:03,073  INFO ResourceLocalizationService:784 - Downloading public rsrc:{ hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/py4j-0.9-src.zip, 1511885030130, FILE, null }
      16:04:03,076  INFO RMContainerImpl:422 - container_1511883728609_0005_01_000002 Container Transitioned from ACQUIRED to RUNNING
      16:04:03,101  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pi.py	dst=null	perm=null	proto=rpc
      16:04:03,102  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pi.py	dst=null	perm=null	proto=rpc
      16:04:03,149  INFO LocalizedResource:203 - Resource hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/pi.py(->/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_3/filecache/19/pi.py) transitioned from DOWNLOADING to LOCALIZED
      16:04:03,155  INFO ResourceLocalizationService:712 - Created localizer for container_1511883728609_0005_01_000002
      16:04:03,171  INFO ResourceLocalizationService:1194 - Writing credentials to the nmPrivate file /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_1/nmPrivate/container_1511883728609_0005_01_000002.tokens. Credentials list: 
      16:04:03,183  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/py4j-0.9-src.zip	dst=null	perm=null	proto=rpc
      16:04:03,184  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/py4j-0.9-src.zip	dst=null	perm=null	proto=rpc
      16:04:03,229  INFO LocalizedResource:203 - Resource hdfs://localhost:38503/user/test/649db0c8-4d79-4881-9a59-c3dbdd439c30/app/lib/py4j-0.9-src.zip(->/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_3/filecache/20/py4j-0.9-src.zip) transitioned from DOWNLOADING to LOCALIZED
      16:04:03,248  INFO DefaultContainerExecutor:644 - Initializing user nobody
      16:04:03,299  INFO DefaultContainerExecutor:126 - Copying from /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_1/nmPrivate/container_1511883728609_0005_01_000002.tokens to /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_1/usercache/nobody/appcache/application_1511883728609_0005/container_1511883728609_0005_01_000002.tokens
      16:04:03,300  INFO DefaultContainerExecutor:133 - Localizer CWD set to /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_1/usercache/nobody/appcache/application_1511883728609_0005 = file:/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_1/usercache/nobody/appcache/application_1511883728609_0005
      16:04:03,384  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job.xml	dst=null	perm=null	proto=rpc
      16:04:03,386  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job.xml	dst=null	perm=null	proto=rpc
      16:04:03,424  INFO LocalizedResource:203 - Resource hdfs://localhost:38503/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job.xml(->/grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_1/usercache/nobody/appcache/application_1511883728609_0005/filecache/10/job.xml) transitioned from DOWNLOADING to LOCALIZED
      16:04:03,424  INFO ContainerImpl:1163 - Container container_1511883728609_0005_01_000002 transitioned from LOCALIZING to LOCALIZED
      16:04:03,474  INFO ContainerImpl:1163 - Container container_1511883728609_0005_01_000002 transitioned from LOCALIZED to RUNNING
      16:04:03,474  INFO ContainersMonitorImpl:222 - Neither virutal-memory nor physical-memory monitoring is needed. Not running the monitor-thread
      16:04:03,570  INFO DefaultContainerExecutor:291 - launchContainer: [bash, /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_1/usercache/nobody/appcache/application_1511883728609_0005/container_1511883728609_0005_01_000002/default_container_executor.sh]
      16:04:03,635  INFO AppSchedulingInfo:220 - checking for deactivate of application :application_1511883728609_0005
      16:04:04,039  INFO BlockManager:1660 - Blocks chosen but could not be replicated = 1; of which 1 have no target, 0 have no source, 0 are UC, 0 are abandoned, 0 already have enough replicas.
      16:04:06,823  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job.split	dst=null	perm=null	proto=rpc	callerContext=mr_attempt_1511883728609_0005_m_000000_0
      16:04:07,001  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark/0000003-171128160232232-oozie-nobo-W@spark-action	dst=null	perm=null	proto=rpc	callerContext=mr_attempt_1511883728609_0005_m_000000_0
      16:04:07,020  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=create	src=/user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark/0000003-171128160232232-oozie-nobo-W@spark-action	dst=null	perm=nobody:supergroup:rw-rw-rw-	proto=rpc	callerContext=mr_attempt_1511883728609_0005_m_000000_0
      16:04:07,040  INFO BlockManager:1660 - Blocks chosen but could not be replicated = 1; of which 1 have no target, 0 have no source, 0 are UC, 0 are abandoned, 0 already have enough replicas.
      16:04:07,056  INFO StateChange:3862 - BLOCK* allocate blk_1073743270_2446, replicas=127.0.0.1:41272, 127.0.0.1:37352 for /user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark/0000003-171128160232232-oozie-nobo-W@spark-action
      16:04:07,129  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743270_2446 src: /127.0.0.1:44760 dest: /127.0.0.1:41272
      16:04:07,131  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743270_2446 src: /127.0.0.1:51272 dest: /127.0.0.1:37352
      16:04:07,170  INFO clienttrace:1490 - src: /127.0.0.1:51272, dest: /127.0.0.1:37352, bytes: 22, op: HDFS_WRITE, cliID: DFSClient_attempt_1511883728609_0005_m_000000_0_1423302170_1, offset: 0, srvID: c355628b-02f4-4102-9823-149c581be5bc, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743270_2446, duration: 38575862
      16:04:07,170  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743270_2446, type=LAST_IN_PIPELINE terminating
      16:04:07,171  INFO clienttrace:1490 - src: /127.0.0.1:44760, dest: /127.0.0.1:41272, bytes: 22, op: HDFS_WRITE, cliID: DFSClient_attempt_1511883728609_0005_m_000000_0_1423302170_1, offset: 0, srvID: 47252ceb-afae-4640-acde-3a2b425713b6, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743270_2446, duration: 39157631
      16:04:07,171  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743270_2446, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37352] terminating
      16:04:07,174  INFO StateChange:3755 - DIR* completeFile: /user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark/0000003-171128160232232-oozie-nobo-W@spark-action is closed by DFSClient_attempt_1511883728609_0005_m_000000_0_1423302170_1
      16:04:07,190  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark/action.xml	dst=null	perm=null	proto=rpc	callerContext=mr_attempt_1511883728609_0005_m_000000_0
      16:04:07,192  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark/action.xml	dst=null	perm=null	proto=rpc	callerContext=mr_attempt_1511883728609_0005_m_000000_0
      16:04:10,040  INFO BlockManager:1660 - Blocks chosen but could not be replicated = 1; of which 1 have no target, 0 have no source, 0 are UC, 0 are abandoned, 0 already have enough replicas.
      16:04:13,041  INFO BlockManager:1660 - Blocks chosen but could not be replicated = 1; of which 1 have no target, 0 have no source, 0 are UC, 0 are abandoned, 0 already have enough replicas.
      16:04:16,042  INFO BlockManager:1660 - Blocks chosen but could not be replicated = 1; of which 1 have no target, 0 have no source, 0 are UC, 0 are abandoned, 0 already have enough replicas.
      16:04:16,092  INFO ContainerManagerImpl:960 - Stopping container with container Id: container_1511883728609_0005_01_000002
      16:04:16,092  INFO NMAuditLogger:89 - USER=nobody	IP=172.27.58.198	OPERATION=Stop Container Request	TARGET=ContainerManageImpl	RESULT=SUCCESS	APPID=application_1511883728609_0005	CONTAINERID=container_1511883728609_0005_01_000002
      16:04:16,092  INFO ContainerImpl:1163 - Container container_1511883728609_0005_01_000002 transitioned from RUNNING to KILLING
      16:04:16,092  INFO ContainerLaunch:541 - Cleaning up container container_1511883728609_0005_01_000002
      16:04:16,111  WARN DefaultContainerExecutor:249 - Exit code from container container_1511883728609_0005_01_000002 is : 143
      16:04:16,115  INFO StateChange:3862 - BLOCK* allocate blk_1073743271_2447, replicas=127.0.0.1:37352, 127.0.0.1:41272 for /tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1.jhist
      16:04:16,119  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=create	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/COMMIT_STARTED	dst=null	perm=nobody:supergroup:rw-rw-rw-	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,120  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743271_2447 src: /127.0.0.1:51420 dest: /127.0.0.1:37352
      16:04:16,120  INFO StateChange:3755 - DIR* completeFile: /tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/COMMIT_STARTED is closed by DFSClient_NONMAPREDUCE_-1622970731_1
      16:04:16,121  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743271_2447 src: /127.0.0.1:44914 dest: /127.0.0.1:41272
      16:04:16,122  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=create	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/COMMIT_SUCCESS	dst=null	perm=nobody:supergroup:rw-rw-rw-	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,124  INFO StateChange:3755 - DIR* completeFile: /tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/COMMIT_SUCCESS is closed by DFSClient_NONMAPREDUCE_-1622970731_1
      16:04:16,127  INFO StateChange:4299 - BLOCK* fsync: /tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1.jhist for DFSClient_NONMAPREDUCE_-1622970731_1
      16:04:16,137  INFO ContainerImpl:1163 - Container container_1511883728609_0005_01_000002 transitioned from KILLING to CONTAINER_CLEANEDUP_AFTER_KILL
      16:04:16,138  INFO DefaultContainerExecutor:492 - Deleting absolute path : /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_0/usercache/nobody/appcache/application_1511883728609_0005/container_1511883728609_0005_01_000002
      16:04:16,139  INFO DefaultContainerExecutor:492 - Deleting absolute path : /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_1/usercache/nobody/appcache/application_1511883728609_0005/container_1511883728609_0005_01_000002
      16:04:16,141  INFO DefaultContainerExecutor:492 - Deleting absolute path : /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_2/usercache/nobody/appcache/application_1511883728609_0005/container_1511883728609_0005_01_000002
      16:04:16,142  INFO DefaultContainerExecutor:492 - Deleting absolute path : /grid/0/nobody/workspace/build-support/SOURCES/oozie/sharelib/spark/target/MiniMRCluster_900807203/MiniMRCluster_900807203-localDir-nm-0_3/usercache/nobody/appcache/application_1511883728609_0005/container_1511883728609_0005_01_000002
      16:04:16,142  INFO NMAuditLogger:89 - USER=nobody	OPERATION=Container Finished - Killed	TARGET=ContainerImpl	RESULT=SUCCESS	APPID=application_1511883728609_0005	CONTAINERID=container_1511883728609_0005_01_000002
      16:04:16,144  INFO ContainerImpl:1163 - Container container_1511883728609_0005_01_000002 transitioned from CONTAINER_CLEANEDUP_AFTER_KILL to DONE
      16:04:16,144  INFO ApplicationImpl:347 - Removing container_1511883728609_0005_01_000002 from application application_1511883728609_0005
      16:04:16,144  INFO ContainersMonitorImpl:222 - Neither virutal-memory nor physical-memory monitoring is needed. Not running the monitor-thread
      16:04:16,145  INFO AuxServices:215 - Got event CONTAINER_STOP for appId application_1511883728609_0005
      16:04:16,145  INFO RMContainerImpl:422 - container_1511883728609_0005_01_000002 Container Transitioned from RUNNING to COMPLETED
      16:04:16,145  INFO RMAuditLogger:141 - USER=nobody	OPERATION=AM Released Container	TARGET=SchedulerApp	RESULT=SUCCESS	APPID=application_1511883728609_0005	CONTAINERID=container_1511883728609_0005_01_000002
      16:04:16,145  INFO SchedulerNode:220 - Released container container_1511883728609_0005_01_000002 of capacity <memory:1024, vCores:1> on host ctr-e134-1499953498516-345856-01-000007.hwx.site:43216, which currently has 0 containers, <memory:0, vCores:0> used and <memory:4096, vCores:8> available, release resources=true
      16:04:16,153  INFO clienttrace:1490 - src: /127.0.0.1:44914, dest: /127.0.0.1:41272, bytes: 21320, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: 47252ceb-afae-4640-acde-3a2b425713b6, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743271_2447, duration: 30652455
      16:04:16,153  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743271_2447, type=LAST_IN_PIPELINE terminating
      16:04:16,155  INFO clienttrace:1490 - src: /127.0.0.1:51420, dest: /127.0.0.1:37352, bytes: 21320, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: c355628b-02f4-4102-9823-149c581be5bc, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743271_2447, duration: 32389529
      16:04:16,155  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743271_2447, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41272] terminating
      16:04:16,156  INFO StateChange:3755 - DIR* completeFile: /tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1.jhist is closed by DFSClient_NONMAPREDUCE_-1622970731_1
      16:04:16,158  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=create	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary_tmp	dst=null	perm=nobody:supergroup:rw-rw-rw-	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,161  INFO StateChange:3862 - BLOCK* allocate blk_1073743272_2448, replicas=127.0.0.1:37352, 127.0.0.1:41272 for /tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary_tmp
      16:04:16,163  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743272_2448 src: /127.0.0.1:51426 dest: /127.0.0.1:37352
      16:04:16,164  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743272_2448 src: /127.0.0.1:44920 dest: /127.0.0.1:41272
      16:04:16,167  INFO clienttrace:1490 - src: /127.0.0.1:44920, dest: /127.0.0.1:41272, bytes: 416, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: 47252ceb-afae-4640-acde-3a2b425713b6, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743272_2448, duration: 2041105
      16:04:16,167  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743272_2448, type=LAST_IN_PIPELINE terminating
      16:04:16,167  INFO clienttrace:1490 - src: /127.0.0.1:51426, dest: /127.0.0.1:37352, bytes: 416, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: c355628b-02f4-4102-9823-149c581be5bc, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743272_2448, duration: 2401053
      16:04:16,168  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743272_2448, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41272] terminating
      16:04:16,168  INFO StateChange:3755 - DIR* completeFile: /tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary_tmp is closed by DFSClient_NONMAPREDUCE_-1622970731_1
      16:04:16,174  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=setPermission	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary_tmp	dst=null	perm=nobody:supergroup:rwxrwx---	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,183  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1.jhist	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,184  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist_tmp	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,189  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1.jhist	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,191  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist_tmp	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,192  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1.jhist	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,194  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=create	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist_tmp	dst=null	perm=nobody:supergroup:rw-rw-rw-	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,199  INFO StateChange:3862 - BLOCK* allocate blk_1073743273_2449, replicas=127.0.0.1:41272, 127.0.0.1:37352 for /tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist_tmp
      16:04:16,201  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743273_2449 src: /127.0.0.1:44924 dest: /127.0.0.1:41272
      16:04:16,202  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743273_2449 src: /127.0.0.1:51434 dest: /127.0.0.1:37352
      16:04:16,205  INFO clienttrace:1490 - src: /127.0.0.1:51434, dest: /127.0.0.1:37352, bytes: 21320, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: c355628b-02f4-4102-9823-149c581be5bc, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743273_2449, duration: 2115381
      16:04:16,205  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743273_2449, type=LAST_IN_PIPELINE terminating
      16:04:16,205  INFO clienttrace:1490 - src: /127.0.0.1:44924, dest: /127.0.0.1:41272, bytes: 21320, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: 47252ceb-afae-4640-acde-3a2b425713b6, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743273_2449, duration: 2464775
      16:04:16,205  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743273_2449, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37352] terminating
      16:04:16,206  INFO StateChange:3755 - DIR* completeFile: /tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist_tmp is closed by DFSClient_NONMAPREDUCE_-1622970731_1
      16:04:16,207  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=setPermission	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist_tmp	dst=null	perm=nobody:supergroup:rwxrwx---	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,208  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1_conf.xml	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,209  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml_tmp	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,210  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1_conf.xml	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,210  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml_tmp	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,212  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005/job_1511883728609_0005_1_conf.xml	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,213  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=create	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml_tmp	dst=null	perm=nobody:supergroup:rw-rw-rw-	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,218  INFO StateChange:3862 - BLOCK* allocate blk_1073743274_2450, replicas=127.0.0.1:41272, 127.0.0.1:37352 for /tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml_tmp
      16:04:16,219  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743274_2450 src: /127.0.0.1:44928 dest: /127.0.0.1:41272
      16:04:16,220  INFO DataNode:669 - Receiving BP-1089370620-172.27.58.198-1511883710117:blk_1073743274_2450 src: /127.0.0.1:51438 dest: /127.0.0.1:37352
      16:04:16,224  INFO clienttrace:1490 - src: /127.0.0.1:51438, dest: /127.0.0.1:37352, bytes: 141926, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: c355628b-02f4-4102-9823-149c581be5bc, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743274_2450, duration: 4058119
      16:04:16,225  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743274_2450, type=LAST_IN_PIPELINE terminating
      16:04:16,225  INFO clienttrace:1490 - src: /127.0.0.1:44928, dest: /127.0.0.1:41272, bytes: 141926, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-1622970731_1, offset: 0, srvID: 47252ceb-afae-4640-acde-3a2b425713b6, blockid: BP-1089370620-172.27.58.198-1511883710117:blk_1073743274_2450, duration: 4303796
      16:04:16,225  INFO DataNode:1463 - PacketResponder: BP-1089370620-172.27.58.198-1511883710117:blk_1073743274_2450, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37352] terminating
      16:04:16,226  INFO StateChange:3755 - DIR* completeFile: /tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml_tmp is closed by DFSClient_NONMAPREDUCE_-1622970731_1
      16:04:16,229  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=setPermission	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml_tmp	dst=null	perm=nobody:supergroup:rwxrwx---	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,234  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=rename	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary_tmp	dst=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary	perm=nobody:supergroup:rwxrwx---	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,241  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=rename	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml_tmp	dst=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml	perm=nobody:supergroup:rwxrwx---	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,243  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=rename	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist_tmp	dst=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist	perm=nobody:supergroup:rwxrwx---	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:16,258  INFO RMAppAttemptImpl:1209 - Updating application attempt appattempt_1511883728609_0005_000001 with final state: FINISHING, and exit status: -1000
      16:04:16,259  INFO RMAppAttemptImpl:809 - appattempt_1511883728609_0005_000001 State change from RUNNING to FINAL_SAVING
      16:04:16,259  INFO RMAppImpl:1124 - Updating application application_1511883728609_0005 with final state: FINISHING
      16:04:16,259  INFO RMAppImpl:779 - application_1511883728609_0005 State change from RUNNING to FINAL_SAVING
      16:04:16,259  INFO RMStateStore:228 - Updating info for app: application_1511883728609_0005
      16:04:16,259  INFO RMAppAttemptImpl:809 - appattempt_1511883728609_0005_000001 State change from FINAL_SAVING to FINISHING
      16:04:16,259  INFO RMAppImpl:779 - application_1511883728609_0005 State change from FINAL_SAVING to FINISHING
      16:04:17,263  INFO ApplicationMasterService:360 - application_1511883728609_0005 unregistered successfully. 
      16:04:17,271  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=delete	src=/tmp/hadoop-yarn/staging/nobody/.staging/job_1511883728609_0005	dst=null	perm=null	proto=rpc	callerContext=mr_appmaster_appattempt_1511883728609_0005_000001
      16:04:17,326  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=listStatus	src=/tmp/hadoop-yarn/staging/history/done_intermediate	dst=null	perm=null	proto=rpc
      16:04:17,328  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=listStatus	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody	dst=null	perm=null	proto=rpc
      16:04:17,330  INFO CompletedJob:95 - Loading job: job_1511883728609_0005 from file: hdfs://localhost:38503/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist
      16:04:17,330  INFO CompletedJob:336 - Loading history file: [hdfs://localhost:38503/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist]
      16:04:17,331  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist	dst=null	perm=null	proto=rpc
      16:04:17,343  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary	dst=null	perm=null	proto=rpc
      16:04:17,344  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=open	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary	dst=null	perm=null	proto=rpc
      16:04:17,346  INFO JobSummary:421 - jobId=job_1511883728609_0005,submitTime=1511885033094,launchTime=1511885040566,firstMapTaskLaunchTime=1511885042919,firstReduceTaskLaunchTime=0,finishTime=1511885056125,resourcesPerMap=1024,resourcesPerReduce=0,numMaps=1,numReduces=0,user=nobody,queue=default,status=SUCCEEDED,mapSlotSeconds=13,reduceSlotSeconds=0,jobName=oozie:launcher:T\=spark:W\=testApp:A\=spark-action:ID\=0000003-171128160232232-oozie-nobo-W
      16:04:17,346  INFO HistoryFileManager:422 - Deleting JobSummary file: [hdfs://localhost:38503/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary]
      16:04:17,347  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=delete	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005.summary	dst=null	perm=null	proto=rpc
      16:04:17,349  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/tmp/hadoop-yarn/staging/history/done/2017/11/28/000000	dst=null	perm=null	proto=rpc
      16:04:17,350  INFO HistoryFileManager:1015 - Moving hdfs://localhost:38503/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist to hdfs://localhost:38503/tmp/hadoop-yarn/staging/history/done/2017/11/28/000000/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist
      16:04:17,351  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=rename (options=[TO_TRASH])	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist	dst=/tmp/hadoop-yarn/staging/history/done/2017/11/28/000000/job_1511883728609_0005-1511885033094-nobody-oozie%3Alauncher%3AT%3Dspark%3AW%3DtestApp%3AA%3Ds-1511885056125-1-0-SUCCEEDED-default-1511885040566.jhist	perm=nobody:supergroup:rwxrwx---	proto=rpc
      16:04:17,351  INFO HistoryFileManager:1015 - Moving hdfs://localhost:38503/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml to hdfs://localhost:38503/tmp/hadoop-yarn/staging/history/done/2017/11/28/000000/job_1511883728609_0005_conf.xml
      16:04:17,352  INFO audit:8930 - allowed=true	ugi=nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=rename (options=[TO_TRASH])	src=/tmp/hadoop-yarn/staging/history/done_intermediate/nobody/job_1511883728609_0005_conf.xml	dst=/tmp/hadoop-yarn/staging/history/done/2017/11/28/000000/job_1511883728609_0005_conf.xml	perm=nobody:supergroup:rwxrwx---	proto=rpc
      16:04:17,383  INFO audit:8930 - allowed=true	ugi=test (auth:PROXY) via nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark/action-data.seq	dst=null	perm=null	proto=rpc
      16:04:17,384  INFO audit:8930 - allowed=true	ugi=test (auth:PROXY) via nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=listStatus	src=/user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark	dst=null	perm=null	proto=rpc
      16:04:17,398  INFO audit:8930 - allowed=true	ugi=test (auth:PROXY) via nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=getfileinfo	src=/user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark	dst=null	perm=null	proto=rpc
      16:04:17,399  INFO audit:8930 - allowed=true	ugi=test (auth:PROXY) via nobody (auth:SIMPLE)	ip=/127.0.0.1	cmd=delete	src=/user/test/oozie-nobo/0000003-171128160232232-oozie-nobo-W/spark-action--spark	dst=null	perm=null	proto=rpc
      Standard Error
      
      Nov 28, 2017 3:42:06 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class
      Nov 28, 2017 3:42:06 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class
      Nov 28, 2017 3:42:06 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class
      Nov 28, 2017 3:42:06 PM com.sun.jersey.server.impl.application.WebApplicationImpl _initiate
      INFO: Initiating Jersey application, version 'Jersey: 1.9 09/02/2011 11:17 AM'
      Nov 28, 2017 3:42:06 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:06 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:07 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest"
      Nov 28, 2017 3:42:08 PM com.google.inject.servlet.GuiceFilter setPipeline
      WARNING: Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected.
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class
      Nov 28, 2017 3:42:09 PM com.sun.jersey.server.impl.application.WebApplicationImpl _initiate
      INFO: Initiating Jersey application, version 'Jersey: 1.9 09/02/2011 11:17 AM'
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:09 PM com.google.inject.servlet.GuiceFilter setPipeline
      WARNING: Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected.
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class
      Nov 28, 2017 3:42:09 PM com.sun.jersey.server.impl.application.WebApplicationImpl _initiate
      INFO: Initiating Jersey application, version 'Jersey: 1.9 09/02/2011 11:17 AM'
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:09 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:10 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:10 PM com.google.inject.servlet.GuiceFilter setPipeline
      WARNING: Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected.
      Nov 28, 2017 3:42:10 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class
      Nov 28, 2017 3:42:10 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class
      Nov 28, 2017 3:42:10 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory register
      INFO: Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class
      Nov 28, 2017 3:42:10 PM com.sun.jersey.server.impl.application.WebApplicationImpl _initiate
      INFO: Initiating Jersey application, version 'Jersey: 1.9 09/02/2011 11:17 AM'
      Nov 28, 2017 3:42:10 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:10 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton"
      Nov 28, 2017 3:42:10 PM com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory getComponentProvider
      INFO: Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton"
      MetaException(message:javax.jdo.JDOFatalUserException: There is no available StoreManager of type "rdbms". Make sure that you have put the relevant DataNucleus store plugin in your CLASSPATH and if defining a connection via JNDI or DataSource you also need to provide persistence property "datanucleus.storeManagerType"
      	at org.datanucleus.api.jdo.NucleusJDOHelper.getJDOExceptionForNucleusException(NucleusJDOHelper.java:616)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.freezeConfiguration(JDOPersistenceManagerFactory.java:834)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.createPersistenceManagerFactory(JDOPersistenceManagerFactory.java:338)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.getPersistenceManagerFactory(JDOPersistenceManagerFactory.java:217)
      	at sun.reflect.GeneratedMethodAccessor24.invoke(Unknown Source)
      	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      	at java.lang.reflect.Method.invoke(Method.java:498)
      	at javax.jdo.JDOHelper$16.run(JDOHelper.java:1975)
      	at java.security.AccessController.doPrivileged(Native Method)
      	at javax.jdo.JDOHelper.invoke(JDOHelper.java:1970)
      	at javax.jdo.JDOHelper.invokeGetPersistenceManagerFactoryOnImplementation(JDOHelper.java:1177)
      	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:814)
      	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:702)
      	at org.apache.hadoop.hive.metastore.ObjectStore.getPMF(ObjectStore.java:495)
      	at org.apache.hadoop.hive.metastore.ObjectStore.getPersistenceManager(ObjectStore.java:524)
      	at org.apache.hadoop.hive.metastore.ObjectStore.initializeHelper(ObjectStore.java:385)
      	at org.apache.hadoop.hive.metastore.ObjectStore.initialize(ObjectStore.java:322)
      	at org.apache.hadoop.hive.metastore.ObjectStore.setConf(ObjectStore.java:283)
      	at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:76)
      	at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:136)
      	at org.apache.hadoop.hive.metastore.RawStoreProxy.<init>(RawStoreProxy.java:60)
      	at org.apache.hadoop.hive.metastore.RawStoreProxy.getProxy(RawStoreProxy.java:69)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.newRawStoreForConf(HiveMetaStore.java:620)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMSForConf(HiveMetaStore.java:599)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMS(HiveMetaStore.java:593)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultDB(HiveMetaStore.java:651)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:433)
      	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
      	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
      	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      	at java.lang.reflect.Method.invoke(Method.java:498)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:105)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:91)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6396)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6391)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6658)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6606)
      	at org.apache.oozie.test.MiniHCatServer$1.run(MiniHCatServer.java:141)
      	at java.lang.Thread.run(Thread.java:745)
      NestedThrowablesStackTrace:
      There is no available StoreManager of type "rdbms". Make sure that you have put the relevant DataNucleus store plugin in your CLASSPATH and if defining a connection via JNDI or DataSource you also need to provide persistence property "datanucleus.storeManagerType"
      org.datanucleus.exceptions.NucleusUserException: There is no available StoreManager of type "rdbms". Make sure that you have put the relevant DataNucleus store plugin in your CLASSPATH and if defining a connection via JNDI or DataSource you also need to provide persistence property "datanucleus.storeManagerType"
      	at org.datanucleus.NucleusContextHelper.createStoreManagerForProperties(NucleusContextHelper.java:162)
      	at org.datanucleus.PersistenceNucleusContextImpl.initialise(PersistenceNucleusContextImpl.java:420)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.freezeConfiguration(JDOPersistenceManagerFactory.java:821)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.createPersistenceManagerFactory(JDOPersistenceManagerFactory.java:338)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.getPersistenceManagerFactory(JDOPersistenceManagerFactory.java:217)
      	at sun.reflect.GeneratedMethodAccessor24.invoke(Unknown Source)
      	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      	at java.lang.reflect.Method.invoke(Method.java:498)
      	at javax.jdo.JDOHelper$16.run(JDOHelper.java:1975)
      	at java.security.AccessController.doPrivileged(Native Method)
      	at javax.jdo.JDOHelper.invoke(JDOHelper.java:1970)
      	at javax.jdo.JDOHelper.invokeGetPersistenceManagerFactoryOnImplementation(JDOHelper.java:1177)
      	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:814)
      	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:702)
      	at org.apache.hadoop.hive.metastore.ObjectStore.getPMF(ObjectStore.java:495)
      	at org.apache.hadoop.hive.metastore.ObjectStore.getPersistenceManager(ObjectStore.java:524)
      	at org.apache.hadoop.hive.metastore.ObjectStore.initializeHelper(ObjectStore.java:385)
      	at org.apache.hadoop.hive.metastore.ObjectStore.initialize(ObjectStore.java:322)
      	at org.apache.hadoop.hive.metastore.ObjectStore.setConf(ObjectStore.java:283)
      	at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:76)
      	at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:136)
      	at org.apache.hadoop.hive.metastore.RawStoreProxy.<init>(RawStoreProxy.java:60)
      	at org.apache.hadoop.hive.metastore.RawStoreProxy.getProxy(RawStoreProxy.java:69)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.newRawStoreForConf(HiveMetaStore.java:620)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMSForConf(HiveMetaStore.java:599)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMS(HiveMetaStore.java:593)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultDB(HiveMetaStore.java:651)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:433)
      	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
      	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
      	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      	at java.lang.reflect.Method.invoke(Method.java:498)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:105)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:91)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6396)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6391)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6658)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6606)
      	at org.apache.oozie.test.MiniHCatServer$1.run(MiniHCatServer.java:141)
      	at java.lang.Thread.run(Thread.java:745)
      )	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:82)	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:91)	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6396)	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6391)	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6658)	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6606)	at org.apache.oozie.test.MiniHCatServer$1.run(MiniHCatServer.java:141)	at java.lang.Thread.run(Thread.java:745)Caused by: MetaException(message:javax.jdo.JDOFatalUserException: There is no available StoreManager of type "rdbms". Make sure that you have put the relevant DataNucleus store plugin in your CLASSPATH and if defining a connection via JNDI or DataSource you also need to provide persistence property "datanucleus.storeManagerType"
      	at org.datanucleus.api.jdo.NucleusJDOHelper.getJDOExceptionForNucleusException(NucleusJDOHelper.java:616)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.freezeConfiguration(JDOPersistenceManagerFactory.java:834)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.createPersistenceManagerFactory(JDOPersistenceManagerFactory.java:338)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.getPersistenceManagerFactory(JDOPersistenceManagerFactory.java:217)
      	at sun.reflect.GeneratedMethodAccessor24.invoke(Unknown Source)
      	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      	at java.lang.reflect.Method.invoke(Method.java:498)
      	at javax.jdo.JDOHelper$16.run(JDOHelper.java:1975)
      	at java.security.AccessController.doPrivileged(Native Method)
      	at javax.jdo.JDOHelper.invoke(JDOHelper.java:1970)
      	at javax.jdo.JDOHelper.invokeGetPersistenceManagerFactoryOnImplementation(JDOHelper.java:1177)
      	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:814)
      	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:702)
      	at org.apache.hadoop.hive.metastore.ObjectStore.getPMF(ObjectStore.java:495)
      	at org.apache.hadoop.hive.metastore.ObjectStore.getPersistenceManager(ObjectStore.java:524)
      	at org.apache.hadoop.hive.metastore.ObjectStore.initializeHelper(ObjectStore.java:385)
      	at org.apache.hadoop.hive.metastore.ObjectStore.initialize(ObjectStore.java:322)
      	at org.apache.hadoop.hive.metastore.ObjectStore.setConf(ObjectStore.java:283)
      	at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:76)
      	at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:136)
      	at org.apache.hadoop.hive.metastore.RawStoreProxy.<init>(RawStoreProxy.java:60)
      	at org.apache.hadoop.hive.metastore.RawStoreProxy.getProxy(RawStoreProxy.java:69)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.newRawStoreForConf(HiveMetaStore.java:620)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMSForConf(HiveMetaStore.java:599)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMS(HiveMetaStore.java:593)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultDB(HiveMetaStore.java:651)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:433)
      	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
      	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
      	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      	at java.lang.reflect.Method.invoke(Method.java:498)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:105)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:91)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6396)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6391)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6658)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6606)
      	at org.apache.oozie.test.MiniHCatServer$1.run(MiniHCatServer.java:141)
      	at java.lang.Thread.run(Thread.java:745)
      NestedThrowablesStackTrace:
      There is no available StoreManager of type "rdbms". Make sure that you have put the relevant DataNucleus store plugin in your CLASSPATH and if defining a connection via JNDI or DataSource you also need to provide persistence property "datanucleus.storeManagerType"
      org.datanucleus.exceptions.NucleusUserException: There is no available StoreManager of type "rdbms". Make sure that you have put the relevant DataNucleus store plugin in your CLASSPATH and if defining a connection via JNDI or DataSource you also need to provide persistence property "datanucleus.storeManagerType"
      	at org.datanucleus.NucleusContextHelper.createStoreManagerForProperties(NucleusContextHelper.java:162)
      	at org.datanucleus.PersistenceNucleusContextImpl.initialise(PersistenceNucleusContextImpl.java:420)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.freezeConfiguration(JDOPersistenceManagerFactory.java:821)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.createPersistenceManagerFactory(JDOPersistenceManagerFactory.java:338)
      	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.getPersistenceManagerFactory(JDOPersistenceManagerFactory.java:217)
      	at sun.reflect.GeneratedMethodAccessor24.invoke(Unknown Source)
      	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      	at java.lang.reflect.Method.invoke(Method.java:498)
      	at javax.jdo.JDOHelper$16.run(JDOHelper.java:1975)
      	at java.security.AccessController.doPrivileged(Native Method)
      	at javax.jdo.JDOHelper.invoke(JDOHelper.java:1970)
      	at javax.jdo.JDOHelper.invokeGetPersistenceManagerFactoryOnImplementation(JDOHelper.java:1177)
      	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:814)
      	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:702)
      	at org.apache.hadoop.hive.metastore.ObjectStore.getPMF(ObjectStore.java:495)
      	at org.apache.hadoop.hive.metastore.ObjectStore.getPersistenceManager(ObjectStore.java:524)
      	at org.apache.hadoop.hive.metastore.ObjectStore.initializeHelper(ObjectStore.java:385)
      	at org.apache.hadoop.hive.metastore.ObjectStore.initialize(ObjectStore.java:322)
      	at org.apache.hadoop.hive.metastore.ObjectStore.setConf(ObjectStore.java:283)
      	at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:76)
      	at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:136)
      	at org.apache.hadoop.hive.metastore.RawStoreProxy.<init>(RawStoreProxy.java:60)
      	at org.apache.hadoop.hive.metastore.RawStoreProxy.getProxy(RawStoreProxy.java:69)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.newRawStoreForConf(HiveMetaStore.java:620)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMSForConf(HiveMetaStore.java:599)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMS(HiveMetaStore.java:593)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultDB(HiveMetaStore.java:651)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:433)
      	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
      	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
      	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      	at java.lang.reflect.Method.invoke(Method.java:498)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:105)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)
      	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:91)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6396)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6391)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6658)
      	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6606)
      	at org.apache.oozie.test.MiniHCatServer$1.run(MiniHCatServer.java:141)
      	at java.lang.Thread.run(Thread.java:745)
      )	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:212)	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:105)	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)	... 7 more

      Attachments

        Activity

          People

            Unassigned Unassigned
            yeshavora Yesha Vora
            Votes:
            0 Vote for this issue
            Watchers:
            1 Start watching this issue

            Dates

              Created:
              Updated: