Details
-
Bug
-
Status: Open
-
Minor
-
Resolution: Unresolved
-
2.2.0
-
None
-
None
-
Spark 3.1.1
Description
Steps:-
Issue : alter drop column fails for all primitive datatype except integer on carbon table with spark 3.1.1
CREATE TABLE uniqdata_alter(CUST_ID int ,CUST_NAME string,ACTIVE_EMUI_VERSION string, DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10), DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double, INTEGER_COLUMN1 int) stored as carbondata ;
Scenario 1:
alter table uniqdata_alter drop columns(Double_COLUMN1);
Scenario 2 :
alter table uniqdata_alter drop columns(cust_name);
Scenario 3:
alter table uniqdata_alter drop columns(DECIMAL_COLUMN2);
Scenario 4:
alter table uniqdata_alter drop columns(BIGINT_COLUMN2);
Scenario 5:
alter table uniqdata_alter drop columns(DOB);
error message for all above scenario:
0: jdbc:hive2://10.21.19.14:23040/default> alter table uniqdata_alter drop columns(Double_COLUMN1);
Error: org.apache.hive.service.cli.HiveSQLException: Error running query: org.apache.carbondata.spark.exception.ProcessMetaDataException: operation failed for default.uniqdata_alter: Alter table drop column operation failed: org.apache.hadoop.hive.ql.metadata.HiveException: Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
col
at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:361)
at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$2(SparkExecuteStatementOperation.scala:263)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:43)
at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:263)
at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:258)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1746)
at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:272)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.carbondata.spark.exception.ProcessMetaDataException: operation failed for default.uniqdata_alter: Alter table drop column operation failed: org.apache.hadoop.hive.ql.metadata.HiveException: Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
col
at org.apache.spark.sql.execution.command.MetadataProcessOperation.throwMetadataException(package.scala:69)
at org.apache.spark.sql.execution.command.MetadataProcessOperation.throwMetadataException$(package.scala:68)
at org.apache.spark.sql.execution.command.MetadataCommand.throwMetadataException(package.scala:134)
at org.apache.spark.sql.execution.command.schema.CarbonAlterTableDropColumnCommand.processMetadata(CarbonAlterTableDropColumnCommand.scala:216)
at org.apache.spark.sql.execution.command.MetadataCommand.$anonfun$run$1(package.scala:137)
at org.apache.spark.sql.execution.command.Auditable.runWithAudit(package.scala:118)
at org.apache.spark.sql.execution.command.Auditable.runWithAudit$(package.scala:114)
at org.apache.spark.sql.execution.command.MetadataCommand.runWithAudit(package.scala:134)
at org.apache.spark.sql.execution.command.MetadataCommand.run(package.scala:137)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:79)
at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:228)
at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3687)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3685)
at org.apache.spark.sql.Dataset.<init>(Dataset.scala:228)
at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:99)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:96)
at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:615)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:610)
at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:650)
at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:325)
... 16 more (state=,code=0)
expected result :
column should drop without error.