diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index dc31505a44..9fd0a70cb3 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1307,7 +1307,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", true, "Use schema evolution to convert self-describing file format's data to the schema desired by the reader."), - HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false, + HIVE_ACID_TABLE_SCAN("hive.acid.table.scan", false, "internal usage only -- do transaction (ACID) table scan.", true), HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY("hive.transactional.events.mem", 10000000, diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java index 040906f34d..02ccd48533 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java @@ -131,10 +131,12 @@ public void configureInputJobProperties(TableDesc tableDesc, jobProperties.put(IOConstants.SCHEMA_EVOLUTION_COLUMNS, columnNamesSb.toString()); jobProperties.put(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, typeNamesSb.toString()); - boolean isAcidTable = AcidUtils.isTablePropertyTransactional(tableProperties); - AcidUtils.setTransactionalTableScan(jobProperties, isAcidTable); + boolean isTransactionalTable = AcidUtils.isTablePropertyTransactional(tableProperties); AcidUtils.AcidOperationalProperties acidOperationalProperties = AcidUtils.getAcidOperationalProperties(tableProperties); + if(acidOperationalProperties.isSplitUpdate()) { + AcidUtils.setAcidTableScan(jobProperties, isTransactionalTable); + } AcidUtils.setAcidOperationalProperties(jobProperties, acidOperationalProperties); } } catch (IOException e) { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java index 996bb02dc7..4f74349198 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java @@ -113,9 +113,9 @@ public static void setOutput(Configuration conf, Credentials credentials, if (sd.getSortCols() != null && !sd.getSortCols().isEmpty()) { throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with sorted column definition from Pig/Mapreduce is not supported"); } - - if (AcidUtils.isAcidTable(table)) { - throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into an insert-only ACID table from Pig/Mapreduce is not supported"); + if (AcidUtils.isTransactionalTable(table)) { + throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a transactional table " + + table.getFullyQualifiedName() + " from Pig/Mapreduce is not supported"); } // Set up a common id hash for this job, so that when we create any temporary directory diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index 49aad392d8..dc8eee1aac 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -564,7 +564,7 @@ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int job.set(BUCKET_COUNT, Integer.toString(buckets)); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg"); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string"); - AcidUtils.setTransactionalTableScan(job,true); + AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); InputSplit[] splits = inf.getSplits(job, buckets); diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java index d5429fbbd6..25db0fb4af 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java @@ -143,7 +143,7 @@ public void assertMaxTransactionId(long expectedMaxTransactionId) { job.set(hive_metastoreConstants.BUCKET_COUNT, Integer.toString(table.getSd().getNumBuckets())); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg"); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string"); - AcidUtils.setTransactionalTableScan(job,true); + AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); InputSplit[] splits = inputFormat.getSplits(job, 1); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index a1cd9ebeef..75eeaf61d6 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -1352,7 +1352,7 @@ public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId) { conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, columnNamesProperty); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, columnTypesProperty); conf.set(hive_metastoreConstants.BUCKET_COUNT, Integer.toString(numBuckets)); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true); AcidInputFormat.RawReader reader = aif.getRawReader(conf, true, bucket, txnList, base, deltas); RecordIdentifier identifier = reader.createKey(); diff --git llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java index 5f010bed8d..a4b877b0be 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java @@ -139,7 +139,7 @@ private LlapRecordReader(MapWork mapWork, JobConf job, FileSplit split, this.counters = new QueryFragmentCounters(job, taskCounters); this.counters.setDesc(QueryFragmentCounters.Desc.MACHINE, hostName); - isAcidScan = HiveConf.getBoolVar(jobConf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN); + isAcidScan = HiveConf.getBoolVar(jobConf, ConfVars.HIVE_ACID_TABLE_SCAN); TypeDescription schema = OrcInputFormat.getDesiredRowTypeDescr( job, isAcidScan, Integer.MAX_VALUE); if (isAcidScan) { diff --git llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java index a1ff360fd0..21f90a7853 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java @@ -228,7 +228,7 @@ public OrcEncodedDataReader(LowLevelCache lowLevelCache, BufferUsageManager buff readerSchema = fileMetadata.getSchema(); } readerIncludes = OrcInputFormat.genIncludedColumns(readerSchema, includedColumnIds); - if (HiveConf.getBoolVar(jobConf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN)) { + if (HiveConf.getBoolVar(jobConf, ConfVars.HIVE_ACID_TABLE_SCAN)) { fileIncludes = OrcInputFormat.shiftReaderIncludedForAcid(readerIncludes); } else { fileIncludes = OrcInputFormat.genIncludedColumns(fileSchema, includedColumnIds); diff --git ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java index 0a25707dd9..be6f5d9415 100644 --- ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java +++ ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java @@ -146,7 +146,7 @@ public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, S this.operation = operation; this.autoCommitValue = sem.getAutoCommitValue(); this.resultSchema = resultSchema; - this.acidResourcesInQuery = sem.hasAcidInQuery(); + this.acidResourcesInQuery = sem.hasTransactionalInQuery(); this.acidSinks = sem.getAcidFileSinks(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 55ef8de9a5..a39b403867 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -2163,7 +2163,7 @@ private void checkArchiveProperty(int partSpecLevel, private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException { Table tbl = db.getTable(desc.getTableName()); - if (!AcidUtils.isFullAcidTable(tbl) && !AcidUtils.isInsertOnlyTable(tbl.getParameters())) { + if (!AcidUtils.isAcidTable(tbl) && !AcidUtils.isInsertOnlyTable(tbl.getParameters())) { throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, tbl.getDbName(), tbl.getTableName()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java index 6589bb2091..fc6052b04e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java @@ -24,7 +24,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.DriverContext; @@ -42,7 +41,6 @@ import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.util.StringUtils; /** * FetchTask implementation. @@ -81,7 +79,7 @@ public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext // push down filters HiveInputFormat.pushFilters(job, ts); - AcidUtils.setTransactionalTableScan(job, ts.getConf().isAcidTable()); + AcidUtils.setAcidTableScan(job, ts.getConf().isAcidTable()); AcidUtils.setAcidOperationalProperties(job, ts.getConf().getAcidOperationalProperties()); } sink = work.getSink(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java index 64aa744206..6df1e32c53 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java @@ -209,7 +209,7 @@ public void initializeMapredLocalWork(MapJoinDesc mjConf, Configuration hconf, // push down filters HiveInputFormat.pushFilters(jobClone, ts); - AcidUtils.setTransactionalTableScan(jobClone, ts.getConf().isAcidTable()); + AcidUtils.setAcidTableScan(jobClone, ts.getConf().isAcidTable()); AcidUtils.setAcidOperationalProperties(jobClone, ts.getConf().getAcidOperationalProperties()); ts.passExecContext(getExecContext()); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java index b6a988dc59..60d2b7b744 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java @@ -485,7 +485,7 @@ private void initializeOperators(Map fetchOpJobConfMap) // push down filters HiveInputFormat.pushFilters(jobClone, ts); - AcidUtils.setTransactionalTableScan(jobClone, ts.getConf().isAcidTable()); + AcidUtils.setAcidTableScan(jobClone, ts.getConf().isAcidTable()); AcidUtils.setAcidOperationalProperties(jobClone, ts.getConf().getAcidOperationalProperties()); // create a fetch operator diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index a85713b350..cc37dd0fe2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -353,7 +353,7 @@ else if (filename.startsWith(BUCKET_PREFIX)) { } return result; } - + //This is used for (full) Acid tables. InsertOnly use NOT_ACID public enum Operation implements Serializable { NOT_ACID, INSERT, UPDATE, DELETE; } @@ -1204,16 +1204,18 @@ public static boolean isTablePropertyTransactional(Configuration conf) { } return resultStr != null && resultStr.equalsIgnoreCase("true"); } - - public static void setTransactionalTableScan(Map parameters, boolean isAcidTable) { - parameters.put(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, Boolean.toString(isAcidTable)); + /** + * Means it's a full acid table + */ + public static void setAcidTableScan(Map parameters, boolean isAcidTable) { + parameters.put(ConfVars.HIVE_ACID_TABLE_SCAN.varname, Boolean.toString(isAcidTable)); } /** * Means it's a full acid table */ - public static void setTransactionalTableScan(Configuration conf, boolean isFullAcidTable) { - HiveConf.setBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, isFullAcidTable); + public static void setAcidTableScan(Configuration conf, boolean isFullAcidTable) { + HiveConf.setBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN, isFullAcidTable); } /** * @param p - not null @@ -1221,15 +1223,12 @@ public static void setTransactionalTableScan(Configuration conf, boolean isFullA public static boolean isDeleteDelta(Path p) { return p.getName().startsWith(DELETE_DELTA_PREFIX); } - /** Checks if a table is a valid ACID table. - * Note, users are responsible for using the correct TxnManager. We do not look at - * SessionState.get().getTxnMgr().supportsAcid() here - * @param table table - * @return true if table is a legit ACID table, false otherwise - * ToDo: this shoudl be renamed isTransactionalTable() since that is what it's checking and covers - * both Acid and MM tables. HIVE-18124 + + /** + * Should produce the same result as + * {@link org.apache.hadoop.hive.metastore.txn.TxnUtils#isTransactionalTable(org.apache.hadoop.hive.metastore.api.Table)} */ - public static boolean isAcidTable(Table table) { + public static boolean isTransactionalTable(Table table) { if (table == null) { return false; } @@ -1240,11 +1239,7 @@ public static boolean isAcidTable(Table table) { return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); } - /** - * ToDo: this shoudl be renamed isTransactionalTable() since that is what it's checking and convers - * both Acid and MM tables. HIVE-18124 - */ - public static boolean isAcidTable(CreateTableDesc table) { + public static boolean isTransactionalTable(CreateTableDesc table) { if (table == null || table.getTblProps() == null) { return false; } @@ -1256,13 +1251,14 @@ public static boolean isAcidTable(CreateTableDesc table) { } /** - * after isTransactionalTable() then make this isAcid() HIVE-18124 + * Should produce the same result as + * {@link org.apache.hadoop.hive.metastore.txn.TxnUtils#isAcidTable(org.apache.hadoop.hive.metastore.api.Table)} */ - public static boolean isFullAcidTable(Table table) { - return isAcidTable(table) && !AcidUtils.isInsertOnlyTable(table); + public static boolean isAcidTable(Table table) { + return isTransactionalTable(table) && !AcidUtils.isInsertOnlyTable(table); } - public static boolean isFullAcidTable(CreateTableDesc td) { + public static boolean isAcidTable(CreateTableDesc td) { if (td == null || td.getTblProps() == null) { return false; } @@ -1392,7 +1388,7 @@ public static long getLogicalLength(FileSystem fs, FileStatus file) throws IOExc /** - * Checks if a table is an ACID table that only supports INSERT, but not UPDATE/DELETE + * Checks if a table is a transactional table that only supports INSERT, but not UPDATE/DELETE * @param params table properties * @return true if table is an INSERT_ONLY table, false otherwise */ @@ -1400,7 +1396,7 @@ public static boolean isInsertOnlyTable(Map params) { return isInsertOnlyTable(params, false); } public static boolean isInsertOnlyTable(Table table) { - return isAcidTable(table) && getAcidOperationalProperties(table).isInsertOnly(); + return isTransactionalTable(table) && getAcidOperationalProperties(table).isInsertOnly(); } // TODO [MM gap]: CTAS may currently be broken. It used to work. See the old code, and why isCtas isn't used? diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index b35df69885..0718995ebe 100755 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -468,7 +468,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job try { Utilities.copyTablePropertiesToConf(table, conf); if(tableScan != null) { - AcidUtils.setTransactionalTableScan(conf, tableScan.getConf().isAcidTable()); + AcidUtils.setAcidTableScan(conf, tableScan.getConf().isAcidTable()); } } catch (HiveException e) { throw new IOException(e); @@ -851,7 +851,7 @@ protected void pushProjectionsAndFilters(JobConf jobConf, Class inputFormatClass // push down filters pushFilters(jobConf, ts); - AcidUtils.setTransactionalTableScan(job, ts.getConf().isAcidTable()); + AcidUtils.setAcidTableScan(job, ts.getConf().isAcidTable()); AcidUtils.setAcidOperationalProperties(job, ts.getConf().getAcidOperationalProperties()); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index becdc71d89..09737fbb1b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -214,7 +214,7 @@ public boolean isAcidRead(Configuration conf, InputSplit inputSplit) { /* * Fallback for the case when OrcSplit flags do not contain hasBase and deltas */ - return HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN); + return HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN); } private static class OrcRecordReader @@ -309,7 +309,7 @@ public static RecordReader createReaderFromFile(Reader file, long offset, long length ) throws IOException { - boolean isTransactionalTableScan = HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN); + boolean isTransactionalTableScan = HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN); if (isTransactionalTableScan) { raiseAcidTablesMustBeReadWithAcidReaderException(conf); } @@ -1692,7 +1692,7 @@ private long computeProjectionSize(List fileTypes, } boolean isTransactionalTableScan = - HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN); + HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN); boolean isSchemaEvolution = HiveConf.getBoolVar(conf, ConfVars.HIVE_SCHEMA_EVOLUTION); TypeDescription readerSchema = OrcInputFormat.getDesiredRowTypeDescr(conf, isTransactionalTableScan, Integer.MAX_VALUE); diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java index edffa5b6e5..fdb3808d1d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java @@ -234,7 +234,7 @@ public long getColumnarProjectionSize() { public boolean canUseLlapIo(Configuration conf) { final boolean hasDelta = deltas != null && !deltas.isEmpty(); final boolean isAcidRead = HiveConf.getBoolVar(conf, - HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN); + HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN); final boolean isVectorized = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED); final AcidUtils.AcidOperationalProperties acidOperationalProperties diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java index d571bd0b48..e3e3b64dc5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java @@ -158,7 +158,7 @@ public VectorizedOrcAcidRowBatchReader(OrcSplit inputSplit, JobConf conf, Report private VectorizedOrcAcidRowBatchReader(JobConf conf, OrcSplit orcSplit, Reporter reporter, VectorizedRowBatchCtx rowBatchCtx) throws IOException { this.rbCtx = rowBatchCtx; - final boolean isAcidRead = HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN); + final boolean isAcidRead = HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN); final AcidUtils.AcidOperationalProperties acidOperationalProperties = AcidUtils.getAcidOperationalProperties(conf); diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java index 736034d3b2..9e481ae266 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java @@ -64,7 +64,7 @@ VectorizedOrcRecordReader(Reader file, Configuration conf, FileSplit fileSplit) throws IOException { - boolean isAcidRead = HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN); + boolean isAcidRead = HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN); if (isAcidRead) { OrcInputFormat.raiseAcidTablesMustBeReadWithAcidReaderException(conf); } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index fdb3603338..97f51381eb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -26,7 +26,6 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; -import org.apache.hadoop.hive.ql.plan.api.Query; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; import org.slf4j.Logger; @@ -314,7 +313,7 @@ private void verifyState(QueryPlan queryPlan) throws LockException { issue ROLLBACK but these tables won't rollback. Can do this by checking ReadEntity/WriteEntity to determine whether it's reading/writing any non acid and raise an appropriate error - * Driver.acidSinks and Driver.acidInQuery can be used if any acid is in the query*/ + * Driver.acidSinks and Driver.transactionalInQuery can be used if any acid is in the query*/ } /** @@ -326,7 +325,7 @@ private boolean allowOperationInATransaction(QueryPlan queryPlan) { //in a txn assuming we can determine the target is a suitable table type. if(queryPlan.getOperation() == HiveOperation.LOAD && queryPlan.getOutputs() != null && queryPlan.getOutputs().size() == 1) { WriteEntity writeEntity = queryPlan.getOutputs().iterator().next(); - if(AcidUtils.isFullAcidTable(writeEntity.getTable()) || AcidUtils.isInsertOnlyTable(writeEntity.getTable())) { + if(AcidUtils.isAcidTable(writeEntity.getTable()) || AcidUtils.isInsertOnlyTable(writeEntity.getTable())) { switch (writeEntity.getWriteType()) { case INSERT: //allow operation in a txn @@ -406,7 +405,7 @@ LockState acquireLocks(QueryPlan plan, Context ctx, String username, boolean isB continue; } if(t != null) { - compBuilder.setIsAcid(AcidUtils.isFullAcidTable(t)); + compBuilder.setIsAcid(AcidUtils.isAcidTable(t)); } LockComponent comp = compBuilder.build(); LOG.debug("Adding lock component to lock request " + comp.toString()); @@ -460,7 +459,7 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi break; case INSERT_OVERWRITE: t = getTable(output); - if (AcidUtils.isAcidTable(t)) { + if (AcidUtils.isTransactionalTable(t)) { compBuilder.setSemiShared(); compBuilder.setOperationType(DataOperationType.UPDATE); } else { @@ -470,7 +469,7 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi break; case INSERT: assert t != null; - if(AcidUtils.isFullAcidTable(t)) { + if(AcidUtils.isAcidTable(t)) { compBuilder.setShared(); } else { @@ -504,7 +503,7 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi output.getWriteType().toString()); } if(t != null) { - compBuilder.setIsAcid(AcidUtils.isFullAcidTable(t)); + compBuilder.setIsAcid(AcidUtils.isAcidTable(t)); } compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite()); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 50bdce89a4..7e059da2f6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1720,7 +1720,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par Path tblDataLocationPath = tbl.getDataLocation(); boolean isMmTableWrite = AcidUtils.isInsertOnlyTable(tbl.getParameters()); assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName(); - boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl); + boolean isFullAcidTable = AcidUtils.isAcidTable(tbl); try { // Get the partition object if it already exists Partition oldPart = getPartition(tbl, partSpec, false); @@ -2317,7 +2317,7 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType Table tbl = getTable(tableName); assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName(); boolean isMmTable = AcidUtils.isInsertOnlyTable(tbl); - boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl); + boolean isFullAcidTable = AcidUtils.isAcidTable(tbl); HiveConf sessionConf = SessionState.getSessionConf(); if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { newFiles = Collections.synchronizedList(new ArrayList()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java index 31d2b2342b..7f5e543c1c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java @@ -410,7 +410,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, if (stack.get(0) instanceof TableScanOperator) { TableScanOperator tso = ((TableScanOperator)stack.get(0)); Table tab = tso.getConf().getTableMetadata(); - if (AcidUtils.isFullAcidTable(tab)) { + if (AcidUtils.isAcidTable(tab)) { /*ACID tables have complex directory layout and require merging of delta files * on read thus we should not try to read bucket files directly*/ return null; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index 85f198b6cd..0f3a8d1561 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -19,13 +19,10 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.Stack; @@ -277,7 +274,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Logger.info("Table " + tbl.getTableName() + " is external. Skip StatsOptimizer."); return null; } - if (AcidUtils.isAcidTable(tbl)) { + if (AcidUtils.isTransactionalTable(tbl)) { + //todo: should this be OK for MM table? Logger.info("Table " + tbl.getTableName() + " is ACID table. Skip StatsOptimizer."); return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index d36d24d090..2c3af5d130 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -125,7 +125,7 @@ // whether any ACID table or Insert-only (mm) table is involved in a query // They both require DbTxnManager and both need to recordValidTxns when acquiring locks in Driver - protected boolean acidInQuery; + protected boolean transactionalInQuery; protected HiveTxnManager txnManager; @@ -1487,8 +1487,8 @@ public QueryProperties getQueryProperties() { return acidFileSinks; } - public boolean hasAcidInQuery() { - return acidInQuery; + public boolean hasTransactionalInQuery() { + return transactionalInQuery; } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index a09b7961c2..8b53cb7ed6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1903,9 +1903,9 @@ private WriteType determineAlterTableWriteType(Table tab, AlterTableDesc desc, A if(desc != null && desc.getProps() != null && Boolean.parseBoolean(desc.getProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))) { convertingToAcid = true; } - if(!AcidUtils.isAcidTable(tab) && convertingToAcid) { - //non to acid conversion (property itself) must be mutexed to prevent concurrent writes. - // See HIVE-16688 for use case. + if(!AcidUtils.isTransactionalTable(tab) && convertingToAcid) { + //non-acid to transactional conversion (property itself) must be mutexed to prevent concurrent writes. + // See HIVE-16688 for use cases. return WriteType.DDL_EXCLUSIVE; } return WriteEntity.determineAlterTableWriteType(op); @@ -2125,7 +2125,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, } // transactional tables are compacted and no longer needs to be bucketed, so not safe for merge/concatenation - boolean isAcid = AcidUtils.isAcidTable(tblObj); + boolean isAcid = AcidUtils.isTransactionalTable(tblObj); if (isAcid) { throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_TRANSACTIONAL.getMsg()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index cc956da575..26b4fea2b7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -159,12 +159,12 @@ private URI initializeFromURI(String fromPath, boolean isLocal) throws IOExcepti throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast, "source contains directory: " + oneSrc.getPath().toString())); } - if(AcidUtils.isFullAcidTable(table)) { + if(AcidUtils.isAcidTable(table)) { if(!AcidUtils.originalBucketFilter.accept(oneSrc.getPath())) { //acid files (e.g. bucket_0000) have ROW_ID embedded in them and so can't be simply //copied to a table so only allow non-acid files for now throw new SemanticException(ErrorMsg.ACID_LOAD_DATA_INVALID_FILE_NAME, - oneSrc.getPath().getName(), table.getDbName() + "." + table.getTableName()); + oneSrc.getPath().getName(), table.getFullyQualifiedName()); } } } @@ -283,7 +283,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { Long txnId = null; int stmtId = -1; - if (AcidUtils.isAcidTable(ts.tableHandle)) { + if (AcidUtils.isTransactionalTable(ts.tableHandle)) { txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); stmtId = SessionState.get().getTxnMgr().getWriteIdAndIncrement(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 28e3621d32..42073e454b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6716,7 +6716,7 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, nullOrder.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z'); } input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(), - maxReducers, (AcidUtils.isFullAcidTable(dest_tab) ? + maxReducers, (AcidUtils.isAcidTable(dest_tab) ? getAcidType(table_desc.getOutputFileFormatClass(), dest) : AcidUtils.Operation.NOT_ACID)); reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0)); ctx.setMultiFileSpray(multiFileSpray); @@ -6781,8 +6781,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) Integer dest_type = qbm.getDestTypeForAlias(dest); Table dest_tab = null; // destination table if any - boolean destTableIsAcid = false; // true for full ACID table and MM table - boolean destTableIsFullAcid = false; // should the destination table be written to using ACID + boolean destTableIsTransactional; // true for full ACID table and MM table + boolean destTableIsFullAcid; // should the destination table be written to using ACID boolean destTableIsTemporary = false; boolean destTableIsMaterialization = false; Partition dest_part = null;// destination partition if any @@ -6803,8 +6803,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) case QBMetaData.DEST_TABLE: { dest_tab = qbm.getDestTableForAlias(dest); - destTableIsAcid = AcidUtils.isAcidTable(dest_tab); - destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab); + destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab); + destTableIsFullAcid = AcidUtils.isAcidTable(dest_tab); destTableIsTemporary = dest_tab.isTemporary(); // Is the user trying to insert into a external tables @@ -6874,11 +6874,9 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsFullAcid) { acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); + //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM checkAcidConstraints(qb, table_desc, dest_tab); } - if (AcidUtils.isInsertOnlyTable(table_desc.getProperties())) { - acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); - } if (isMmTable) { txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); } else { @@ -6891,7 +6889,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old // deltas and base and leave them up to the cleaner to clean up LoadFileType loadType = (!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), - dest_tab.getTableName()) && !destTableIsAcid) + dest_tab.getTableName()) && !destTableIsTransactional) ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING; ltd.setLoadFileType(loadType); ltd.setLbCtx(lbCtx); @@ -6915,8 +6913,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_part = qbm.getDestPartitionForAlias(dest); dest_tab = dest_part.getTable(); - destTableIsAcid = AcidUtils.isAcidTable(dest_tab); - destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab); + destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab); + destTableIsFullAcid = AcidUtils.isAcidTable(dest_tab); checkExternalTable(dest_tab); @@ -6951,11 +6949,9 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsFullAcid) { acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); + //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM? checkAcidConstraints(qb, table_desc, dest_tab); } - if (AcidUtils.isInsertOnlyTable(dest_part.getTable().getParameters())) { - acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); - } if (isMmTable) { txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); } else { @@ -6966,7 +6962,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old // deltas and base and leave them up to the cleaner to clean up LoadFileType loadType = (!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), - dest_tab.getTableName()) && !destTableIsAcid) // // Both Full-acid and MM tables are excluded. + dest_tab.getTableName()) && !destTableIsTransactional) // // Both Full-acid and MM tables are excluded. ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING; ltd.setLoadFileType(loadType); ltd.setLbCtx(lbCtx); @@ -7039,8 +7035,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) viewDesc.setSchema(new ArrayList(field_schemas)); } - destTableIsAcid = tblDesc != null && AcidUtils.isAcidTable(tblDesc); - destTableIsFullAcid = tblDesc != null && AcidUtils.isFullAcidTable(tblDesc); + destTableIsTransactional = tblDesc != null && AcidUtils.isTransactionalTable(tblDesc); + destTableIsFullAcid = tblDesc != null && AcidUtils.isAcidTable(tblDesc); boolean isDestTempFile = true; if (!ctx.isMRTmpFileURI(dest_path.toUri().toString())) { @@ -7053,7 +7049,10 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) boolean isDfsDir = (dest_type.intValue() == QBMetaData.DEST_DFS_FILE); // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats. loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, dest_path, isDfsDir, cols, - colTypes, destTableIsAcid ? Operation.INSERT : Operation.NOT_ACID, isMmCtas)); + colTypes, + destTableIsFullAcid ?//there is a change here - prev version had 'transadtional', one beofre' acid' + Operation.INSERT : Operation.NOT_ACID, + isMmCtas)); if (tblDesc == null) { if (viewDesc != null) { table_desc = PlanUtils.getTableDesc(viewDesc, cols, colTypes); @@ -7140,7 +7139,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, table_desc, dest_part, - dest_path, currentTableId, destTableIsAcid, destTableIsTemporary, + dest_path, currentTableId, destTableIsFullAcid, destTableIsTemporary,//this was 1/4 acid destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, dest_tab, txnId, isMmCtas, dest_type, qb); if (isMmCtas) { @@ -7495,11 +7494,6 @@ String fixCtasColumnName(String colName) { return colName; } - // Check constraints on acid tables. This includes - // * Check that the table is bucketed - // * Check that the table is not sorted - // This method assumes you have already decided that this is an Acid write. Don't call it if - // that isn't true. private void checkAcidConstraints(QB qb, TableDesc tableDesc, Table table) throws SemanticException { /* @@ -7512,10 +7506,6 @@ These props are now enabled elsewhere (see commit diffs). It would be better in backwards incompatible. */ conf.set(AcidUtils.CONF_ACID_KEY, "true"); - - if (table.getSortCols() != null && table.getSortCols().size() > 0) { - throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName()); - } } /** @@ -12123,8 +12113,8 @@ public void validate() throws SemanticException { if (p != null) { tbl = p.getTable(); } - if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || AcidUtils.isInsertOnlyTable(tbl.getParameters()))) { - acidInQuery = true; + if (tbl != null && AcidUtils.isTransactionalTable(tbl)) { + transactionalInQuery = true; checkAcidTxnManager(tbl); } } @@ -12186,8 +12176,8 @@ public void validate() throws SemanticException { tbl = writeEntity.getTable(); } - if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || AcidUtils.isInsertOnlyTable(tbl.getParameters()))) { - acidInQuery = true; + if (tbl != null && AcidUtils.isTransactionalTable(tbl)) { + transactionalInQuery = true; checkAcidTxnManager(tbl); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java index 7ecd1ffa5e..075aac506f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java @@ -175,7 +175,7 @@ private void checkValidSetClauseTarget(ASTNode colName, Table targetTable) throw } if(!foundColumnInTargetTable) { throw new SemanticException(ErrorMsg.INVALID_TARGET_COLUMN_IN_SET_CLAUSE, colName.getText(), - getDotName(new String[] {targetTable.getDbName(), targetTable.getTableName()})); + targetTable.getFullyQualifiedName()); } } private ASTNode findLHSofAssignment(ASTNode assignment) { @@ -318,7 +318,7 @@ private ReparseResult parseRewrittenQuery(StringBuilder rewrittenQueryStr, Strin private void validateTargetTable(Table mTable) throws SemanticException { if (mTable.getTableType() == TableType.VIRTUAL_VIEW || mTable.getTableType() == TableType.MATERIALIZED_VIEW) { - LOG.error("Table " + getDotName(new String[] {mTable.getDbName(), mTable.getTableName()}) + " is a view or materialized view"); + LOG.error("Table " + mTable.getFullyQualifiedName() + " is a view or materialized view"); throw new SemanticException(ErrorMsg.UPDATE_DELETE_VIEW.getMsg()); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java index 9477df68ee..601713703e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java @@ -35,10 +35,10 @@ * Need to remember whether this is an acid compliant operation, and if so whether it is an * insert, update, or delete. */ - final AcidUtils.Operation writeType; + private final AcidUtils.Operation writeType; - public LoadDesc(final Path sourcePath, AcidUtils.Operation writeType) { + LoadDesc(final Path sourcePath, AcidUtils.Operation writeType) { this.sourcePath = sourcePath; this.writeType = writeType; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java index 4b7d2b45b4..aa96072f36 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java @@ -135,7 +135,7 @@ public TableScanDesc(final String alias, List vcs, Table tblMetad this.alias = alias; this.virtualCols = vcs; this.tableMetadata = tblMetadata; - isAcidTable = AcidUtils.isFullAcidTable(this.tableMetadata); + isAcidTable = AcidUtils.isAcidTable(this.tableMetadata); if (isAcidTable) { acidOperationalProperties = AcidUtils.getAcidOperationalProperties(this.tableMetadata); } diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java index e8d3184f40..6bba6b06a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java @@ -48,7 +48,7 @@ public static Partish buildFor(Table table, Partition part) { // rename @Deprecated public final boolean isAcid() { - return AcidUtils.isFullAcidTable(getTable()); + return AcidUtils.isAcidTable(getTable()); } public abstract Table getTable(); diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index a804527527..b9077d1acf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -364,7 +364,7 @@ private void setColumnTypes(JobConf job, List cols) { } job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, colNames.toString()); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, colTypes.toString()); - HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true); + HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true); HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); } diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java index b98c74a889..073cc1afdb 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java @@ -260,7 +260,12 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; - +/* +{"transactionid":0,"bucketid":536870912,"rowid":0} 0 2/000000_0 +{"transactionid":0,"bucketid":536870912,"rowid":1} 0 4/000000_0 +{"transactionid":24,"bucketid":536870912,"rowid":0} 4 4/delta_0000024_0000024_0000/000000_0 +{"transactionid":24,"bucketid":536870912,"rowid":1} 5 5/delta_0000024_0000024_0000/000000_0 +*/ String[][] expected = new String[][] { //from pre-acid insert {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/000000_0"}, diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 5d2652457b..cec1d34535 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -836,7 +836,7 @@ public void testBIStrategySplitBlockBoundary() throws Exception { public void testEtlCombinedStrategy() throws Exception { conf.set(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname, "ETL"); conf.set(HiveConf.ConfVars.HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS.varname, "1000000"); - AcidUtils.setTransactionalTableScan(conf, true); + AcidUtils.setAcidTableScan(conf, true); conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); conf.set(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, "default"); @@ -2285,7 +2285,7 @@ public void testVectorizationWithAcid() throws Exception { conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, BigRow.getColumnNamesProperty()); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, BigRow.getColumnTypesProperty()); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true); org.apache.hadoop.mapred.RecordReader reader = inputFormat.getRecordReader(splits[0], conf, Reporter.NULL); @@ -3377,7 +3377,7 @@ public void testVectorReaderFooterSerialize() throws Exception { public void testACIDReaderNoFooterSerialize() throws Exception { MockFileSystem fs = new MockFileSystem(conf); MockPath mockPath = new MockPath(fs, "mock:///mocktable5"); - conf.set("hive.transactional.table.scan", "true"); + conf.set(ConfVars.HIVE_ACID_TABLE_SCAN.varname, "true"); conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty()); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty()); @@ -3458,7 +3458,7 @@ public void testACIDReaderNoFooterSerialize() throws Exception { public void testACIDReaderFooterSerialize() throws Exception { MockFileSystem fs = new MockFileSystem(conf); MockPath mockPath = new MockPath(fs, "mock:///mocktable6"); - conf.set("hive.transactional.table.scan", "true"); + conf.set(ConfVars.HIVE_ACID_TABLE_SCAN.varname, "true"); conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty()); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty()); @@ -3569,7 +3569,7 @@ public void testACIDReaderNoFooterSerializeWithDeltas() throws Exception { //set up props for read conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); - AcidUtils.setTransactionalTableScan(conf, true); + AcidUtils.setAcidTableScan(conf, true); OrcInputFormat orcInputFormat = new OrcInputFormat(); InputSplit[] splits = orcInputFormat.getSplits(conf, 2); @@ -3648,7 +3648,7 @@ public void testACIDReaderFooterSerializeWithDeltas() throws Exception { //set up props for read conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); - AcidUtils.setTransactionalTableScan(conf, true); + AcidUtils.setAcidTableScan(conf, true); OrcInputFormat orcInputFormat = new OrcInputFormat(); InputSplit[] splits = orcInputFormat.getSplits(conf, 2); diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java index 030f012a21..56148d0168 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java @@ -20,8 +20,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.RawLocalFileSystem; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.io.BucketCodec; import org.apache.orc.CompressionKind; @@ -67,10 +65,7 @@ import com.google.common.collect.Lists; import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -389,7 +384,7 @@ public void testNewBase() throws Exception { Configuration conf = new Configuration(); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "col1"); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "string"); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true); Reader reader = Mockito.mock(Reader.class, settings); RecordReader recordReader = Mockito.mock(RecordReader.class, settings); @@ -607,7 +602,7 @@ public void testEmpty() throws Exception { OrcFile.readerOptions(conf)); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty()); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty()); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true); OrcRawRecordMerger merger = new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET, createMaximalTxnList(), new Reader.Options(), @@ -686,7 +681,7 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty()); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty()); - AcidUtils.setTransactionalTableScan(conf,true); + AcidUtils.setAcidTableScan(conf,true); conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); //the first "split" is for base/ @@ -1154,7 +1149,7 @@ public synchronized void addedRow(int rows) throws IOException { JobConf job = new JobConf(); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, BigRow.getColumnNamesProperty()); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, BigRow.getColumnTypesProperty()); - AcidUtils.setTransactionalTableScan(job,true); + AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); job.set("mapred.min.split.size", "1"); job.set("mapred.max.split.size", "2"); @@ -1289,7 +1284,7 @@ public synchronized void addedRow(int rows) throws IOException { job.set("mapred.input.dir", root.toString()); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, BigRow.getColumnNamesProperty()); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, BigRow.getColumnTypesProperty()); - AcidUtils.setTransactionalTableScan(job,true); + AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); InputSplit[] splits = inf.getSplits(job, 5); //base has 10 rows, so 5 splits, 1 delta has 2 rows so 1 split, and 1 delta has 3 so 2 splits @@ -1386,7 +1381,7 @@ public void testRecordReaderDelta() throws Exception { job.set("bucket_count", "1"); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty()); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty()); - AcidUtils.setTransactionalTableScan(job,true); + AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); InputSplit[] splits = inf.getSplits(job, 5); assertEquals(2, splits.length); @@ -1460,7 +1455,7 @@ private void testRecordReaderIncompleteDelta(boolean use130Format) throws Except job.set("bucket_count", "2"); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty()); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty()); - AcidUtils.setTransactionalTableScan(job,true); + AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); // read the keys before the delta is flushed diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java index 95e34632b9..3eb33da25e 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java @@ -92,7 +92,7 @@ public void setup() throws Exception { conf = new JobConf(); conf.set("bucket_count", "1"); conf.set(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); - conf.setBoolean(HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, true); + conf.setBoolean(HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN.varname, true); conf.set(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, "default"); conf.setInt(HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname, AcidUtils.AcidOperationalProperties.getDefault().toInt()); diff --git ql/src/test/queries/clientnegative/insert_sorted.q ql/src/test/queries/clientnegative/create_acid_sorted.q similarity index 51% rename from ql/src/test/queries/clientnegative/insert_sorted.q rename to ql/src/test/queries/clientnegative/create_acid_sorted.q index cd1a69cc3f..56bcb6257d 100644 --- ql/src/test/queries/clientnegative/insert_sorted.q +++ ql/src/test/queries/clientnegative/create_acid_sorted.q @@ -1,7 +1,7 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +create table mm_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true', "transactional_properties"="insert_only"); +insert into mm_insertsort values (1, '1'),(2, '2'); create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); - -insert into table acid_insertsort select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; diff --git ql/src/test/queries/clientnegative/delete_sorted.q ql/src/test/queries/clientnegative/delete_sorted.q deleted file mode 100644 index 9f82c1f80e..0000000000 --- ql/src/test/queries/clientnegative/delete_sorted.q +++ /dev/null @@ -1,7 +0,0 @@ -set hive.support.concurrency=true; -set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; - - -create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); - -delete from acid_insertsort where a = 3; diff --git ql/src/test/queries/clientnegative/insert_values_sorted.q ql/src/test/queries/clientnegative/insert_values_sorted.q deleted file mode 100644 index ee26402a2c..0000000000 --- ql/src/test/queries/clientnegative/insert_values_sorted.q +++ /dev/null @@ -1,7 +0,0 @@ -set hive.support.concurrency=true; -set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; - - -create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); - -insert into table acid_insertsort values (1, 'abc'),(2, 'def'); diff --git ql/src/test/queries/clientnegative/update_sorted.q ql/src/test/queries/clientnegative/update_sorted.q deleted file mode 100644 index f9e5db52f5..0000000000 --- ql/src/test/queries/clientnegative/update_sorted.q +++ /dev/null @@ -1,7 +0,0 @@ -set hive.support.concurrency=true; -set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; - - -create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); - -update acid_insertsort set b = 'fred' where b = 'bob'; diff --git ql/src/test/results/clientnegative/create_acid_sorted.q.out ql/src/test/results/clientnegative/create_acid_sorted.q.out new file mode 100644 index 0000000000..0b1d253855 --- /dev/null +++ ql/src/test/results/clientnegative/create_acid_sorted.q.out @@ -0,0 +1,21 @@ +PREHOOK: query: create table mm_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true', "transactional_properties"="insert_only") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mm_insertsort +POSTHOOK: query: create table mm_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true', "transactional_properties"="insert_only") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mm_insertsort +PREHOOK: query: insert into mm_insertsort values (1, '1'),(2, '2') +PREHOOK: type: QUERY +PREHOOK: Output: default@mm_insertsort +POSTHOOK: query: insert into mm_insertsort values (1, '1'),(2, '2') +POSTHOOK: type: QUERY +POSTHOOK: Output: default@mm_insertsort +POSTHOOK: Lineage: mm_insertsort.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: mm_insertsort.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_insertsort +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.) diff --git ql/src/test/results/clientnegative/delete_sorted.q.out ql/src/test/results/clientnegative/delete_sorted.q.out deleted file mode 100644 index 0d248d0175..0000000000 --- ql/src/test/results/clientnegative/delete_sorted.q.out +++ /dev/null @@ -1,9 +0,0 @@ -PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@acid_insertsort -POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_insertsort -FAILED: SemanticException [Error 10298]: ACID insert, update, delete not supported on tables that are sorted, table acid_insertsort diff --git ql/src/test/results/clientnegative/insert_sorted.q.out ql/src/test/results/clientnegative/insert_sorted.q.out deleted file mode 100644 index 50dd5ebbe1..0000000000 --- ql/src/test/results/clientnegative/insert_sorted.q.out +++ /dev/null @@ -1,9 +0,0 @@ -PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@acid_insertsort -POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_insertsort -FAILED: SemanticException [Error 10298]: ACID insert, update, delete not supported on tables that are sorted, table acid_insertsort diff --git ql/src/test/results/clientnegative/insert_values_sorted.q.out ql/src/test/results/clientnegative/insert_values_sorted.q.out deleted file mode 100644 index 50dd5ebbe1..0000000000 --- ql/src/test/results/clientnegative/insert_values_sorted.q.out +++ /dev/null @@ -1,9 +0,0 @@ -PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@acid_insertsort -POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_insertsort -FAILED: SemanticException [Error 10298]: ACID insert, update, delete not supported on tables that are sorted, table acid_insertsort diff --git ql/src/test/results/clientnegative/update_sorted.q.out ql/src/test/results/clientnegative/update_sorted.q.out deleted file mode 100644 index 50dd5ebbe1..0000000000 --- ql/src/test/results/clientnegative/update_sorted.q.out +++ /dev/null @@ -1,9 +0,0 @@ -PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@acid_insertsort -POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_insertsort -FAILED: SemanticException [Error 10298]: ACID insert, update, delete not supported on tables that are sorted, table acid_insertsort diff --git ql/src/test/results/clientpositive/llap/mm_all.q.out ql/src/test/results/clientpositive/llap/mm_all.q.out index 0374ea6302..767ab47eec 100644 --- ql/src/test/results/clientpositive/llap/mm_all.q.out +++ ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -138,7 +138,6 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_mm - Write Type: INSERT micromanaged table: true Stage: Stage-3 diff --git ql/src/test/results/clientpositive/mm_all.q.out ql/src/test/results/clientpositive/mm_all.q.out index 34fd8990eb..66597c0a67 100644 --- ql/src/test/results/clientpositive/mm_all.q.out +++ ql/src/test/results/clientpositive/mm_all.q.out @@ -135,7 +135,6 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_mm - Write Type: INSERT micromanaged table: true Stage: Stage-2 diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java index 1512ffbfe1..f849b1a0c3 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java @@ -54,7 +54,7 @@ public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException { @Override public void onDropTable(DropTableEvent tableEvent) throws MetaException { - if (TxnUtils.isAcidTable(tableEvent.getTable())) { + if (TxnUtils.isTransactionalTable(tableEvent.getTable())) { txnHandler = getTxnHandler(); txnHandler.cleanupRecords(HiveObjectType.TABLE, null, tableEvent.getTable(), null); } @@ -62,7 +62,7 @@ public void onDropTable(DropTableEvent tableEvent) throws MetaException { @Override public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaException { - if (TxnUtils.isAcidTable(partitionEvent.getTable())) { + if (TxnUtils.isTransactionalTable(partitionEvent.getTable())) { txnHandler = getTxnHandler(); txnHandler.cleanupRecords(HiveObjectType.PARTITION, null, partitionEvent.getTable(), partitionEvent.getPartitionIterator()); @@ -76,7 +76,7 @@ private TxnStore getTxnHandler() { boolean origConcurrency = false; // Since TxnUtils.getTxnStore calls TxnHandler.setConf -> checkQFileTestHack -> TxnDbUtil.setConfValues, - // which may change the values of below two entries, we need to avoid pulluting the original values + // which may change the values of below two entries, we need to avoid polluting the original values if (hackOn) { origTxnMgr = MetastoreConf.getVar(conf, ConfVars.HIVE_TXN_MANAGER); origConcurrency = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index da1031300a..c9ee68891e 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent; import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent; import org.apache.hadoop.hive.metastore.events.PreEventContext; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -142,7 +143,7 @@ private void handleAlterTableTransactionalProp(PreAlterTableEvent context) throw } if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { - throw new MetaException(getTableName(newTable) + + throw new MetaException(Warehouse.getQualifiedName(newTable) + " cannot be declared transactional because it's an external table"); } validateTableStructure(context.getHandler(), newTable); @@ -182,6 +183,17 @@ private void handleAlterTableTransactionalProp(PreAlterTableEvent context) throw } } } + checkSorted(newTable); + } + private void checkSorted(Table newTable) throws MetaException { + if(!TxnUtils.isAcidTable(newTable)) { + return; + } + StorageDescriptor sd = newTable.getSd(); + if (sd.getSortCols() != null && sd.getSortCols().size() > 0) { + throw new MetaException("Table " + Warehouse.getQualifiedName(newTable) + + " cannot support full ACID functionality since it is sorted."); + } } /** @@ -231,7 +243,7 @@ private void handleCreateTableTransactionalProp(PreCreateTableEvent context) thr } if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { - throw new MetaException(newTable.getDbName() + "." + newTable.getTableName() + + throw new MetaException(Warehouse.getQualifiedName(newTable) + " cannot be declared transactional because it's an external table"); } @@ -241,9 +253,9 @@ private void handleCreateTableTransactionalProp(PreCreateTableEvent context) thr normazlieTransactionalPropertyDefault(newTable); } initializeTransactionalProperties(newTable); + checkSorted(newTable); return; } - // transactional is found, but the value is not in expected range throw new MetaException("'transactional' property of TBLPROPERTIES may only have value 'true'"); } @@ -366,18 +378,16 @@ private void validateTableStructure(IHMSHandler hmsHandler, Table table) ); if (!validFile) { throw new IllegalStateException("Unexpected data file name format. Cannot convert " + - getTableName(table) + " to transactional table. File: " + fileStatus.getPath()); + Warehouse.getQualifiedName(table) + " to transactional table. File: " + + fileStatus.getPath()); } } } catch (IOException|NoSuchObjectException e) { - String msg = "Unable to list files for " + getTableName(table); + String msg = "Unable to list files for " + Warehouse.getQualifiedName(table); LOG.error(msg, e); MetaException e1 = new MetaException(msg); e1.initCause(e); throw e1; } } - private static String getTableName(Table table) { - return table.getDbName() + "." + table.getTableName(); - } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index afb4f6b7fb..027fb3f51a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.common.ValidCompactorTxnList; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.Table; @@ -129,13 +130,14 @@ public static TxnStore getTxnStore(Configuration conf) { } } - /** Checks if a table is a valid ACID table. + /** * Note, users are responsible for using the correct TxnManager. We do not look at * SessionState.get().getTxnMgr().supportsAcid() here - * @param table table - * @return true if table is a legit ACID table, false otherwise + * Should produce the same result as + * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isTransactionalTable(org.apache.hadoop.hive.ql.metadata.Table)} + * @return true if table is a transactional table, false otherwise */ - public static boolean isAcidTable(Table table) { + public static boolean isTransactionalTable(Table table) { if (table == null) { return false; } @@ -145,6 +147,16 @@ public static boolean isAcidTable(Table table) { } /** + * Should produce the same result as + * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)} + */ + public static boolean isAcidTable(Table table) { + return TxnUtils.isTransactionalTable(table) && + TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY.equals(table.getParameters() + .get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES)); + } + + /** * Build a query (or queries if one query is too big but only for the case of 'IN' * composite clause. For the case of 'NOT IN' clauses, multiple queries change * the semantics of the intended query.