diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java index a660747e6a..4469455017 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java @@ -581,7 +581,7 @@ private String getMatchedText(ASTNode n) { } /** * Here we take a Merge statement AST and generate a semantically equivalent multi-insert - * statement to exectue. Each Insert leg represents a single WHEN clause. As much as possible, + * statement to execute. Each Insert leg represents a single WHEN clause. As much as possible, * the new SQL statement is made to look like the input SQL statement so that it's easier to map * Query Compiler errors from generated SQL to original one this way. * The generated SQL is a complete representation of the original input for the same reason. diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java new file mode 100644 index 0000000000..c8e57a5deb --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java @@ -0,0 +1,130 @@ +package org.apache.hadoop.hive.ql; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.glassfish.jersey.message.internal.StringBuilderUtils; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.List; + +public class TestTxnExIm extends TxnCommandsBaseForTests { + static final private Logger LOG = LoggerFactory.getLogger(TestTxnCommands.class); + private static final String TEST_DATA_DIR = new File(System.getProperty("java.io.tmpdir") + + File.separator + TestTxnCommands.class.getCanonicalName() + "-" + System.currentTimeMillis() + ).getPath().replaceAll("\\\\", "/"); + @Override + String getTestDataDir() { + return TEST_DATA_DIR; + } + @Override + public void setUp() throws Exception { + super.setUp(); + hiveConf.set(MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID.getVarname(), "true"); + } + @Test + public void testExport() throws Exception { + int[][] rows1 = {{1,2},{3,4}}; + runStatementOnDriver("drop table if exists T"); + runStatementOnDriver("create table T (a int, b int) stored as ORC"); + runStatementOnDriver("insert into T(a,b) " + makeValuesClause(rows1)); +// "EXPORT TABLE tablename [PARTITION (part_column=\"value\"[, ...])]\n" + +// " TO 'export_target_path' [ FOR replication('eventid') ]"; + runStatementOnDriver("export table T to '" + getTestDataDir() + "/export'"); + runStatementOnDriver("select * from T"); + } + @Test + public void testExportPart() throws Exception { + hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + int[][] rows1 = {{1,2,1},{3,4,2}}; + runStatementOnDriver("drop table if exists T"); + runStatementOnDriver("create table T (a int, b int) partitioned by (p int) stored as ORC"); + runStatementOnDriver("insert into T partition(p)" + makeValuesClause(rows1)); + // "EXPORT TABLE tablename [PARTITION (part_column=\"value\"[, ...])]\n" + + // " TO 'export_target_path' [ FOR replication('eventid') ]"; + runStatementOnDriver("export table T partition(p=1) to '" + getTestDataDir() + "/export'"); + runStatementOnDriver("select * from T"); + } + + //create table like + @Test + public void testCTLT() throws Exception { + runStatementOnDriver("drop table if exists T"); + runStatementOnDriver("create table T like " + Table.ACIDTBL + " TBLPROPERTIES ('transactional'='true')"); +// runStatementOnDriver("create table T like " + Table.ACIDTBL); + List rs = runStatementOnDriver("show create table T"); + StringBuilder sb = new StringBuilder("*show create table"); + for (String r : rs) { + sb.append("\n").append(r); + } + LOG.error(sb.toString()); + } + @Test + public void testExportBuckets() throws Exception { + int[][] rows1 = {{1,2},{2,4}}; + runStatementOnDriver("insert into " + Table.ACIDTBL + makeValuesClause(rows1)); + runStatementOnDriver("export table " + Table.ACIDTBL + " to '" + getTestDataDir() + "/export'"); + int ti = 1; + /* + target/tmp/org.apache.hadoop.hive.ql.TestTxnCommands-1518808273004/ +├── export +│   ├── _metadata +│   └── data +│   └── delta_0000013_0000013_0000 +│   ├── bucket_00000 +│   └── bucket_00001 +└── warehouse + ├── acidtbl + │   └── delta_0000013_0000013_0000 + │   ├── bucket_00000 + │   └── bucket_00001 + */ + } + @Test + public void testExportPartPartial() throws Exception { + hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + int[][] rows1 = {{1,2,1,1},{3,4,2,2}}; + runStatementOnDriver("drop table if exists T"); + runStatementOnDriver("create table T (a int, b int) partitioned by (p int, q int) stored as ORC"); + runStatementOnDriver("insert into T partition(p,q)" + makeValuesClause(rows1)); + // "EXPORT TABLE tablename [PARTITION (part_column=\"value\"[, ...])]\n" + + // " TO 'export_target_path' [ FOR replication('eventid') ]"; + runStatementOnDriver("export table T partition(p=1) to '" + getTestDataDir() + "/export'"); + runStatementOnDriver("select * from T"); + /* + * target/tmp/org.apache.hadoop.hive.ql.TestTxnCommands-1518808733048/ +├── export +│   ├── _metadata +│   └── p=1 +│   └── q=1 +│   └── delta_0000015_0000015_0000 +│   └── bucket_00000 +└── warehouse + ├── acidtbl + ├── acidtblpart + ├── nonacidnonbucket + ├── nonacidorctbl + ├── nonacidorctbl2 + └── t + ├── p=1 + │   └── q=1 + │   └── delta_0000015_0000015_0000 + │   └── bucket_00000 + └── p=2 + └── q=2 + └── delta_0000015_0000015_0000 + └── bucket_00000 +*/ + } + @Test + public void testCtasPartitioned() throws Exception { + runStatementOnDriver("insert into " + Table.NONACIDNONBUCKET + "(a,b) values(1,2),(1,3)"); + runStatementOnDriver("create table myctas partitioned by (b int) stored as " + + "ORC TBLPROPERTIES ('transactional'='true') as select a, b from " + Table.NONACIDORCTBL); + int j = ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode();//this code doesn't propagate + } +} diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index ddbbd1a1ca..a4e88b851b 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -181,6 +181,15 @@ public void testCTAS() throws Exception { runStatementOnDriver("drop table if exists myctas"); int[][] values = {{1,2},{3,4}}; runStatementOnDriver("insert into " + Table.NONACIDORCTBL + makeValuesClause(values)); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_EXPLAIN_USER, false); + if(!true) { + List rs1 = runStatementOnDriver("explain create table myctas stored as ORC TBLPROPERTIES ('transactional" + + "'='true', 'transactional_properties'='default') as select a, b from " + Table.NONACIDORCTBL); + LOG.error("explain ctas"); + for (String r : rs1) { + LOG.error(r); + } + } runStatementOnDriver("create table myctas stored as ORC TBLPROPERTIES ('transactional" + "'='true', 'transactional_properties'='default') as select a, b from " + Table.NONACIDORCTBL); List rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas order by ROW__ID"); @@ -192,7 +201,7 @@ public void testCTAS() throws Exception { runStatementOnDriver("insert into " + Table.ACIDTBL + makeValuesClause(values)); runStatementOnDriver("create table myctas2 stored as ORC TBLPROPERTIES ('transactional" + - "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL);//todo: try this with acid default - it seem makeing table acid in listener is too late + "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL);//todo: try this with acid default - it seem making table acid in listener is too late rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas2 order by ROW__ID"); String expected2[][] = { {"{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000018_0000018_0000/bucket_00000"}, @@ -234,7 +243,7 @@ public void testCtasEmpty() throws Exception { /** * Insert into unbucketed acid table from union all query - * Union All is flattend so nested subdirs are created and acid move drops them since + * Union All is flattened so nested subdirs are created and acid move drops them since * delta dirs have unique names */ @Test