To generate sample data: ======================== for i in `seq 1 1000`; do echo $i,$i,$i; done > /tmp/test.txt Create relevant directories in hdfs: ==================================== hadoop dfs -mkdir /tmp/test_table; hadoop dfs -rmr /tmp/test_table/test.txt; hadoop dfs -put /tmp/test.txt /tmp/test_table/ Hive test case to reproduce the issue: ====================================== DROP TABLE IF EXISTS test_table_text; CREATE EXTERNAL TABLE test_table_text (f1 int, f2 int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' stored as textfile location '/tmp/test_table/'; DROP TABLE IF EXISTS test_table_orc1; DROP TABLE IF EXISTS test_table_orc2; DROP TABLE IF EXISTS test_table_orc3; DROP TABLE IF EXISTS test_table_orc4; CREATE TABLE test_table_orc1(f1 int,f2 int) PARTITIONED BY (f3 string) STORED AS ORC; CREATE TABLE test_table_orc2(f1 int,f2 int) PARTITIONED BY (f3 string) STORED AS ORC; CREATE TABLE test_table_orc3(f1 int,f2 int) PARTITIONED BY (f3 string) STORED AS ORC; CREATE TABLE test_table_orc4(f1 int,f2 int) PARTITIONED BY (f3 string) STORED AS ORC; set hive.enforce.bucketing=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.exec.max.dynamic.partitions.pernode=4096; set hive.exec.max.dynamic.partitions=4096; FROM test_table_text INSERT OVERWRITE TABLE test_table_orc1 PARTITION (f3) SELECT f1, f2, f2 as f3 WHERE f2 > 0 and f2 < 10 INSERT OVERWRITE TABLE test_table_orc2 PARTITION (f3) SELECT f1, f2, f2 as f3 WHERE f2 > 10 and f2 < 20 INSERT OVERWRITE TABLE test_table_orc3 PARTITION (f3) SELECT f1, f2, f2 as f3 WHERE f2 > 20 and f2 < 30 INSERT OVERWRITE TABLE test_table_orc4 PARTITION (f3) SELECT f1, f2, f2 as f3 WHERE f2 > 30; Loading the partitions (especially movetask) will take a lot of time.