Details
-
Bug
-
Status: Closed
-
Major
-
Resolution: Fixed
-
2.0.0
-
None
-
Spark 2.3.2
Description
Select filter query fails on SI columns of different SI tables.
Steps :-
0: jdbc:hive2://10.20.255.171:23040/default> create table brinjal (imei string,AMSize string,channelsId string,ActiveCountry string, Activecity string,gamePointId double,deviceInformationId double,productionDate Timestamp,deliveryDate timestamp,deliverycharge double) stored as carbondata TBLPROPERTIES('inverted_index'='imei,AMSize,channelsId,ActiveCountry,Activecity,productionDate,deliveryDate','sort_columns'='imei,AMSize,channelsId,ActiveCountry,Activecity,productionDate,deliveryDate','table_blocksize'='1','SORT_SCOPE'='GLOBAL_SORT','carbon.column.compressor'='zstd');
---------+
Result |
---------+
---------+
No rows selected (0.153 seconds)
0: jdbc:hive2://10.20.255.171:23040/default> LOAD DATA INPATH 'hdfs://hacluster/chetan/vardhandaterestruct.csv' INTO TABLE brinjal OPTIONS('DELIMITER'=',', 'QUOTECHAR'= '"','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'= 'imei,deviceInformationId,AMSize,channelsId,ActiveCountry,Activecity,gamePointId,productionDate,deliveryDate,deliverycharge');
---------+
Result |
---------+
---------+
No rows selected (2.357 seconds)
0: jdbc:hive2://10.20.255.171:23040/default> CREATE INDEX indextable1 ON TABLE brinjal (channelsId) AS 'carbondata' PROPERTIES('carbon.column.compressor'='zstd');
---------+
Result |
---------+
---------+
No rows selected (1.048 seconds)
0: jdbc:hive2://10.20.255.171:23040/default> CREATE INDEX indextable2 ON TABLE brinjal (ActiveCountry) AS 'carbondata' PROPERTIES('carbon.column.compressor'='zstd');
---------+
Result |
---------+
---------+
No rows selected (1.895 seconds)
0: jdbc:hive2://10.20.255.171:23040/default> select * from brinjal where ActiveCountry ='Chinese' or channelsId =4;
Error: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
Exchange hashpartitioning(positionReference#6440, 200)
+- *(6) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440)
+- Union
:- *(3) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440)
: +- Exchange hashpartitioning(positionReference#6440, 200)
: +- *(2) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440)
: +- *(2) Project positionReference#6440
: +- *(2) Filter (cast(channelsid#6439 as int) = 4)
: +- *(2) FileScan carbondata 2_0.indextable1positionReference#6440,channelsid#6439 PushedFilters: CastExpr((cast(channelsid#6439 as int) = 4)), ReadSchema: struct<channelsid:string,positionReference:string>
+- *(5) HashAggregate(keys=positionReference#6442, functions=[], output=positionReference#6442)
+- Exchange hashpartitioning(positionReference#6442, 200)
+- *(4) HashAggregate(keys=positionReference#6442, functions=[], output=positionReference#6442)
+- *(4) Project positionReference#6442
+- *(4) Filter (activecountry#6441 = Chinese)
+- *(4) FileScan carbondata 2_0.indextable2positionReference#6442,activecountry#6441 PushedFilters: [EqualTo(activecountry,Chinese)], ReadSchema: struct<activecountry:string,positionReference:string> (state=,code=0)
Log -
org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder.addSegmentProperties(SegmentPropertiesAndSchemaHolder.java:117)org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder.addSegmentProperties(SegmentPropertiesAndSchemaHolder.java:117)2020-06-01 12:19:28,058 | ERROR | [HiveServer2-Background-Pool: Thread-1150] | Error executing query, currentState RUNNING, | org.apache.spark.internal.Logging$class.logError(Logging.scala:91)org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:Exchange hashpartitioning(positionReference#6440, 200)- *(6) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440) - Union :- *(3) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440) : - Exchange hashpartitioning(positionReference#6440, 200) : - *(2) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440) : - *(2) Project positionReference#6440 : - *(2) Filter (cast(channelsid#6439 as int) = 4) : - *(2) FileScan carbondata 2_0.indextable1positionReference#6440,channelsid#6439 PushedFilters: CastExpr((cast(channelsid#6439 as int) = 4)), ReadSchema: struct<channelsid:string,positionReference:string> +- *(5) HashAggregate(keys=positionReference#6442, functions=[], output=positionReference#6442) - Exchange hashpartitioning(positionReference#6442, 200) - *(4) HashAggregate(keys=positionReference#6442, functions=[], output=positionReference#6442) - *(4) Project positionReference#6442 - *(4) Filter (activecountry#6441 = Chinese) - *(4) FileScan carbondata 2_0.indextable2positionReference#6442,activecountry#6441 PushedFilters: [EqualTo(activecountry,Chinese)], ReadSchema: struct<activecountry:string,positionReference:string>
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:56) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.doExecute(ShuffleExchangeExec.scala:119) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) at org.apache.spark.sql.execution.InputAdapter.inputRDDs(WholeStageCodegenExec.scala:371) at org.apache.spark.sql.execution.aggregate.HashAggregateExec.inputRDDs(HashAggregateExec.scala:150) at org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:605) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) at org.apache.spark.sql.secondaryindex.joins.BroadCastSIFilterPushJoin.x$3$lzycompute(BroadCastSIFilterPushJoin.scala:119) at org.apache.spark.sql.secondaryindex.joins.BroadCastSIFilterPushJoin.x$3(BroadCastSIFilterPushJoin.scala:91) at org.apache.spark.sql.secondaryindex.joins.BroadCastSIFilterPushJoin.inputCopy$lzycompute(BroadCastSIFilterPushJoin.scala:91) at org.apache.spark.sql.secondaryindex.joins.BroadCastSIFilterPushJoin.inputCopy(BroadCastSIFilterPushJoin.scala:91) at org.apache.spark.sql.secondaryindex.joins.BroadCastSIFilterPushJoin.doExecute(BroadCastSIFilterPushJoin.scala:140) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan.getByteArrayRdd(SparkPlan.scala:247) at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:294) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3278) at org.apache.spark.sql.Dataset$$anonfun$collect$1.apply(Dataset.scala:2727) at org.apache.spark.sql.Dataset$$anonfun$collect$1.apply(Dataset.scala:2727) at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3259) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77) at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3258) at org.apache.spark.sql.Dataset.collect(Dataset.scala:2727) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:246) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:175) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:171) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:185) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745)Caused by: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:Exchange hashpartitioning(positionReference#6440, 200)- *(2) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440) - *(2) Project positionReference#6440 - *(2) Filter (cast(channelsid#6439 as int) = 4) - *(2) FileScan carbondata 2_0.indextable1positionReference#6440,channelsid#6439 PushedFilters: CastExpr((cast(channelsid#6439 as int) = 4)), ReadSchema: struct<channelsid:string,positionReference:string>
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:56) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.doExecute(ShuffleExchangeExec.scala:119) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) at org.apache.spark.sql.execution.InputAdapter.inputRDDs(WholeStageCodegenExec.scala:371) at org.apache.spark.sql.execution.aggregate.HashAggregateExec.inputRDDs(HashAggregateExec.scala:150) at org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:605) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) at org.apache.spark.sql.execution.UnionExec$$anonfun$doExecute$1.apply(basicPhysicalOperators.scala:557) at org.apache.spark.sql.execution.UnionExec$$anonfun$doExecute$1.apply(basicPhysicalOperators.scala:557) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) at scala.collection.immutable.List.foreach(List.scala:381) at scala.collection.TraversableLike$class.map(TraversableLike.scala:234) at scala.collection.immutable.List.map(List.scala:285) at org.apache.spark.sql.execution.UnionExec.doExecute(basicPhysicalOperators.scala:557) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) at org.apache.spark.sql.execution.InputAdapter.inputRDDs(WholeStageCodegenExec.scala:371) at org.apache.spark.sql.execution.aggregate.HashAggregateExec.inputRDDs(HashAggregateExec.scala:150) at org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:605) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.prepareShuffleDependency(ShuffleExchangeExec.scala:92) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec$$anonfun$doExecute$1.apply(ShuffleExchangeExec.scala:128) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec$$anonfun$doExecute$1.apply(ShuffleExchangeExec.scala:119) at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52) ... 48 moreCaused by: java.lang.NullPointerException at org.apache.carbondata.core.index.IndexStoreManager.getAllCGAndFGIndexes(IndexStoreManager.java:99) at org.apache.carbondata.core.metadata.schema.table.CarbonTable.getAllVisibleIndexes(CarbonTable.java:1259) at org.apache.carbondata.core.index.IndexChooser.<init>(IndexChooser.java:70) at org.apache.carbondata.hadoop.api.CarbonInputFormat.getPrunedBlocklets(CarbonInputFormat.java:574) at org.apache.carbondata.hadoop.api.CarbonInputFormat.getDataBlocksOfSegment(CarbonInputFormat.java:493) at org.apache.carbondata.hadoop.api.CarbonTableInputFormat.getSplits(CarbonTableInputFormat.java:378) at org.apache.carbondata.hadoop.api.CarbonTableInputFormat.getSplits(CarbonTableInputFormat.java:205) at org.apache.carbondata.spark.rdd.CarbonScanRDD.internalGetPartitions(CarbonScanRDD.scala:144) at org.apache.carbondata.spark.rdd.CarbonRDD.getPartitions(CarbonRDD.scala:68) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:253) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:251) at scala.Option.getOrElse(Option.scala:121) at org.apache.spark.rdd.RDD.partitions(RDD.scala:251) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:46) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:253) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:251) at scala.Option.getOrElse(Option.scala:121) at org.apache.spark.rdd.RDD.partitions(RDD.scala:251) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:46) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:253) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:251) at scala.Option.getOrElse(Option.scala:121) at org.apache.spark.rdd.RDD.partitions(RDD.scala:251) at org.apache.spark.ShuffleDependency.<init>(Dependency.scala:91) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec$.prepareShuffleDependency(ShuffleExchangeExec.scala:321) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.prepareShuffleDependency(ShuffleExchangeExec.scala:91) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec$$anonfun$doExecute$1.apply(ShuffleExchangeExec.scala:128) at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec$$anonfun$doExecute$1.apply(ShuffleExchangeExec.scala:119) at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52) ... 91 more2020-06-01 12:19:28,063 | ERROR | [HiveServer2-Background-Pool: Thread-1150] | Error running hive query: | org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:179)org.apache.hive.service.cli.HiveSQLException: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:Exchange hashpartitioning(positionReference#6440, 200)- *(6) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440) - Union :- *(3) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440) : - Exchange hashpartitioning(positionReference#6440, 200) : - *(2) HashAggregate(keys=positionReference#6440, functions=[], output=positionReference#6440) : - *(2) Project positionReference#6440 : - *(2) Filter (cast(channelsid#6439 as int) = 4) : - *(2) FileScan carbondata 2_0.indextable1positionReference#6440,channelsid#6439 PushedFilters: CastExpr((cast(channelsid#6439 as int) = 4)), ReadSchema: struct<channelsid:string,positionReference:string> +- *(5) HashAggregate(keys=positionReference#6442, functions=[], output=positionReference#6442) - Exchange hashpartitioning(positionReference#6442, 200) - *(4) HashAggregate(keys=positionReference#6442, functions=[], output=positionReference#6442) - *(4) Project positionReference#6442 - *(4) Filter (activecountry#6441 = Chinese) - *(4) FileScan carbondata 2_0.indextable2positionReference#6442,activecountry#6441 PushedFilters: [EqualTo(activecountry,Chinese)], ReadSchema: struct<activecountry:string,positionReference:string>
at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:269) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:175) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:171) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:185) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745)2020-06-01 12:19:28,066 | INFO | [HiveServer2-Handler-Pool: Thread-1027] | Asked to cancel job group 24ed0ee3-4944-4369-8295-c469f9866796 | org.apache.spark.internal.Logging$class.logInfo(Logging.scala:54)2020-06-01 12:19:28,082 | ERROR | [HiveServer2-Handler-Pool: Thread-1027] | Failed to close inode 320140 | org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:940)org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /user/sparkhive/warehouse/2_0.db/brinjal/LockFiles/droptable.lock (inode 320140): File does not exist. Holder DFSClient_NONMAPREDUCE_-602216185_1027 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3428) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:3518) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3485) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:786) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:536) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2049) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2045) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2043)
at org.apache.hadoop.ipc.Client.call(Client.java:1475) at org.apache.hadoop.ipc.Client.call(Client.java:1412) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229) at com.sun.proxy.$Proxy12.complete(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:462) at sun.reflect.GeneratedMethodAccessor308.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:497) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) at com.sun.proxy.$Proxy13.complete(Unknown Source) at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:2291) at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:2267) at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:2232) at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:937) at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:969) at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1076) at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2799) at org.apache.hadoop.fs.FileSystem.closeAllForUGI(FileSystem.java:460) at org.apache.hive.service.cli.session.HiveSessionImplwithUGI.close(HiveSessionImplwithUGI.java:112) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:497) at org.apache.hive.service.cli.session.HiveSessionProxy.invoke(HiveSessionProxy.java:78) at org.apache.hive.service.cli.session.HiveSessionProxy.access$000(HiveSessionProxy.java:36) at org.apache.hive.service.cli.session.HiveSessionProxy$1.run(HiveSessionProxy.java:63) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hive.service.cli.session.HiveSessionProxy.invoke(HiveSessionProxy.java:59) at com.sun.proxy.$Proxy40.close(Unknown Source) at org.apache.hive.service.cli.session.SessionManager.closeSession(SessionManager.java:280) at org.apache.spark.sql.hive.thriftserver.SparkSQLSessionManager.closeSession(SparkSQLSessionManager.scala:76) at org.apache.hive.service.cli.CLIService.closeSession(CLIService.java:237) at org.apache.hive.service.cli.thrift.ThriftCLIService.CloseSession(ThriftCLIService.java:397) at org.apache.hive.service.cli.thrift.TCLIService$Processor$CloseSession.getResult(TCLIService.java:1273) at org.apache.hive.service.cli.thrift.TCLIService$Processor$CloseSession.getResult(TCLIService.java:1258) at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39) at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39) at org.apache.hive.service.auth.TSetIpAddressProcessor.process(TSetIpAddressProcessor.java:53) at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745)