diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java index 9348efc5a1..e470914ed8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java @@ -32,8 +32,8 @@ private static final long serialVersionUID = 1L; public static final String SCHEMA = - "compactionid,dbname,tabname,partname,type,state,hostname,workerid,starttime,duration,hadoopjobid#" + - "string:string:string:string:string:string:string:string:string:string:string"; + "compactionid,dbname,tabname,partname,type,state,hostname,workerid,starttime,duration,hadoopjobid,errormessage#" + + "string:string:string:string:string:string:string:string:string:string:string:string"; private String resFile; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java index 517d88237c..d45597ba95 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java @@ -86,6 +86,8 @@ private void writeHeader(DataOutputStream os) throws IOException { os.writeBytes("Duration(ms)"); os.write(Utilities.tabCode); os.writeBytes("HadoopJobId"); + os.write(Utilities.tabCode); + os.writeBytes("Error message"); os.write(Utilities.newLineCode); } @@ -115,6 +117,9 @@ private void writeRow(DataOutputStream os, ShowCompactResponseElement e) throws os.writeBytes(e.isSetEndTime() ? Long.toString(e.getEndTime() - e.getStart()) : NO_VAL); os.write(Utilities.tabCode); os.writeBytes(e.isSetHadoopJobId() ? e.getHadoopJobId() : NO_VAL); + os.write(Utilities.tabCode); + String error = e.getErrorMessage(); + os.writeBytes(error == null ? NO_VAL : error); os.write(Utilities.newLineCode); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 6f64290120..9ba2b24def 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -217,6 +217,7 @@ public Object run() throws Exception { } catch (Exception e) { LOG.error("Caught exception when cleaning, unable to complete cleaning of " + ci + " " + StringUtils.stringifyException(e)); + ci.errorMessage = e.getMessage(); txnHandler.markFailed(ci); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index dedc990d0f..37a5862791 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -140,6 +140,7 @@ public void run() { } catch (Throwable t) { LOG.error("Caught exception while trying to determine if we should compact {}. " + "Marking failed to avoid repeated failures, {}", ci, t); + ci.errorMessage = t.getMessage(); txnHandler.markFailed(ci); } } @@ -459,12 +460,20 @@ private boolean checkCompactionElig(CompactionInfo ci, ShowCompactResponse curre if (txnHandler.checkFailedCompactions(ci)) { LOG.warn("Will not initiate compaction for " + ci.getFullPartitionName() + " since last " + MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD + " attempts to compact it failed."); + ci.errorMessage = "Compaction is not initiated since last " + + MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD + " consecutive compaction attempts failed)"; txnHandler.markFailed(ci); return false; } } catch (Throwable e) { - LOG.error("Caught exception while checking compaction eligibility " + - StringUtils.stringifyException(e)); + LOG.error("Caught exception while checking compaction eligibility.", e); + try { + ci.errorMessage = e.getMessage(); + txnHandler.markFailed(ci); + } catch (MetaException ex) { + LOG.error("Caught exception while marking compaction as failed.", e); + return false; + } } return true; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 5aff71e0e9..383969a3a6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -94,12 +94,12 @@ public void run() { // Make sure nothing escapes this run method and kills the metastore at large, // so wrap it in a big catch Throwable statement. CompactionHeartbeater heartbeater = null; + CompactionInfo ci = null; try { if (msc == null) { msc = HiveMetaStoreUtils.getHiveMetastoreClient(conf); } - final CompactionInfo ci = CompactionInfo.optionalCompactionInfoStructToInfo( - msc.findNextCompact(workerName)); + ci = CompactionInfo.optionalCompactionInfoStructToInfo(msc.findNextCompact(workerName)); LOG.debug("Processing compaction request " + ci); if (ci == null && !stop.get()) { @@ -211,10 +211,11 @@ public void run() { UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(), UserGroupInformation.getLoginUser()); final Partition fp = p; + final CompactionInfo fci = ci; ugi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { - mr.run(conf, jobName.toString(), t, fp, sd, tblValidWriteIds, ci, su, msc, dir); + mr.run(conf, jobName.toString(), t, fp, sd, tblValidWriteIds, fci, su, msc, dir); return null; } }); @@ -234,16 +235,26 @@ public Object run() throws Exception { } catch (Throwable e) { LOG.error("Caught exception while trying to compact " + ci + ". Marking failed to avoid repeated failures, " + StringUtils.stringifyException(e)); + ci.errorMessage = e.getMessage(); msc.markFailed(CompactionInfo.compactionInfoToStruct(ci)); msc.abortTxns(Collections.singletonList(compactorTxnId)); } } catch (TException | IOException t) { LOG.error("Caught an exception in the main loop of compactor worker " + workerName + ", " + StringUtils.stringifyException(t)); - if (msc != null) { - msc.close(); + try { + if (msc != null && ci != null) { + ci.errorMessage = t.getMessage(); + msc.markFailed(CompactionInfo.compactionInfoToStruct(ci)); + } + } catch (TException e) { + LOG.error("Caught an exception while trying to mark compaction {} as failed: {}", ci, e); + } finally { + if (msc != null) { + msc.close(); + msc = null; + } } - msc = null; try { Thread.sleep(SLEEP_TIME); } catch (InterruptedException e) { diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java index e5895547e6..15fcfc0e35 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java @@ -231,18 +231,44 @@ public void testMarkCleaned() throws Exception { assertTrue(TxnHandler.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState())); } + @Test + public void testShowCompactions() throws Exception { + final String dbName = "foo"; + final String tableName = "bar"; + final String partitionName = "ds=today"; + CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MINOR); + rqst.setPartitionname(partitionName); + txnHandler.compact(rqst); + ShowCompactResponse showCompactResponse = txnHandler.showCompact(new ShowCompactRequest()); + showCompactResponse.getCompacts().forEach(e -> { + assertEquals(dbName, e.getDbname()); + assertEquals(tableName, e.getTablename()); + assertEquals(partitionName, e.getPartitionname()); + assertEquals("initiated", e.getState()); + assertEquals(CompactionType.MINOR, e.getType()); + assertEquals(1, e.getId()); + }); + } + @Test public void testMarkFailed() throws Exception { - CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR); - rqst.setPartitionname("ds=today"); + final String dbName = "foo"; + final String tableName = "bar"; + final String partitionName = "ds=today"; + final String workerId = "fred"; + final String status = "failed"; + final String errorMessage = "Dummy error"; + CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MINOR); + rqst.setPartitionname(partitionName); txnHandler.compact(rqst); assertEquals(0, txnHandler.findReadyToClean().size()); - CompactionInfo ci = txnHandler.findNextToCompact("fred"); + CompactionInfo ci = txnHandler.findNextToCompact(workerId); assertNotNull(ci); assertEquals(0, txnHandler.findReadyToClean().size()); + ci.errorMessage = errorMessage; txnHandler.markFailed(ci); - assertNull(txnHandler.findNextToCompact("fred")); + assertNull(txnHandler.findNextToCompact(workerId)); boolean failedCheck = txnHandler.checkFailedCompactions(ci); assertFalse(failedCheck); try { @@ -262,23 +288,37 @@ public void testMarkFailed() throws Exception { // Add more failed compactions so that the total is exactly COMPACTOR_INITIATOR_FAILED_THRESHOLD for (int i = 1 ; i < conf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD); i++) { - addFailedCompaction("foo", "bar", CompactionType.MINOR, "ds=today"); + addFailedCompaction(dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); } // Now checkFailedCompactions() will return true assertTrue(txnHandler.checkFailedCompactions(ci)); - + // Check the output of show compactions + checkShowCompaction(dbName, tableName, partitionName, status, errorMessage); // Now add enough failed compactions to ensure purgeCompactionHistory() will attempt delete; // HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED is enough for this. // But we also want enough to tickle the code in TxnUtils.buildQueryWithINClauseStrings() // so that it produces multiple queries. For that we need at least 290. for (int i = 0 ; i < 300; i++) { - addFailedCompaction("foo", "bar", CompactionType.MINOR, "ds=today"); + addFailedCompaction(dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); } + checkShowCompaction(dbName, tableName, partitionName, status, errorMessage); txnHandler.purgeCompactionHistory(); } + private void checkShowCompaction(String dbName, String tableName, String partition, + String status, String errorMessage) throws MetaException { + ShowCompactResponse showCompactResponse = txnHandler.showCompact(new ShowCompactRequest()); + showCompactResponse.getCompacts().forEach(e -> { + assertEquals(dbName, e.getDbname()); + assertEquals(tableName, e.getTablename()); + assertEquals(partition, e.getPartitionname()); + assertEquals(status, e.getState()); + assertEquals(errorMessage, e.getErrorMessage()); + }); + } + private void addFailedCompaction(String dbName, String tableName, CompactionType type, - String partitionName) throws MetaException { + String partitionName, String errorMessage) throws MetaException { CompactionRequest rqst; CompactionInfo ci; rqst = new CompactionRequest(dbName, tableName, type); @@ -286,6 +326,7 @@ private void addFailedCompaction(String dbName, String tableName, CompactionType txnHandler.compact(rqst); ci = txnHandler.findNextToCompact("fred"); assertNotNull(ci); + ci.errorMessage = errorMessage; txnHandler.markFailed(ci); } diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out index 03c6724ec2..fd7ad23eaa 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out @@ -145,8 +145,8 @@ PREHOOK: query: show compactions PREHOOK: type: SHOW COMPACTIONS POSTHOOK: query: show compactions POSTHOOK: type: SHOW COMPACTIONS -CompactionId Database Table Partition Type State Hostname Worker Start Time Duration(ms) HadoopJobId -1 default partitioned_acid_table p=abc MINOR initiated --- --- --- --- --- +CompactionId Database Table Partition Type State Hostname Worker Start Time Duration(ms) HadoopJobId Error message +1 default partitioned_acid_table p=abc MINOR initiated --- --- --- --- --- --- PREHOOK: query: drop table partitioned_acid_table PREHOOK: type: DROPTABLE PREHOOK: Input: default@partitioned_acid_table diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java index 4aee45ce5f..31b6ed450b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java @@ -50,6 +50,7 @@ private static final org.apache.thrift.protocol.TField WORKER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("workerId", org.apache.thrift.protocol.TType.STRING, (short)10); private static final org.apache.thrift.protocol.TField START_FIELD_DESC = new org.apache.thrift.protocol.TField("start", org.apache.thrift.protocol.TType.I64, (short)11); private static final org.apache.thrift.protocol.TField HIGHEST_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("highestWriteId", org.apache.thrift.protocol.TType.I64, (short)12); + private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)13); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -69,6 +70,7 @@ private String workerId; // optional private long start; // optional private long highestWriteId; // optional + private String errorMessage; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -87,7 +89,8 @@ STATE((short)9, "state"), WORKER_ID((short)10, "workerId"), START((short)11, "start"), - HIGHEST_WRITE_ID((short)12, "highestWriteId"); + HIGHEST_WRITE_ID((short)12, "highestWriteId"), + ERROR_MESSAGE((short)13, "errorMessage"); private static final Map byName = new HashMap(); @@ -126,6 +129,8 @@ public static _Fields findByThriftId(int fieldId) { return START; case 12: // HIGHEST_WRITE_ID return HIGHEST_WRITE_ID; + case 13: // ERROR_MESSAGE + return ERROR_MESSAGE; default: return null; } @@ -171,7 +176,7 @@ public String getFieldName() { private static final int __START_ISSET_ID = 2; private static final int __HIGHESTWRITEID_ISSET_ID = 3; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID,_Fields.ERROR_MESSAGE}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -199,6 +204,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.HIGHEST_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("highestWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionInfoStruct.class, metaDataMap); } @@ -253,6 +260,9 @@ public CompactionInfoStruct(CompactionInfoStruct other) { } this.start = other.start; this.highestWriteId = other.highestWriteId; + if (other.isSetErrorMessage()) { + this.errorMessage = other.errorMessage; + } } public CompactionInfoStruct deepCopy() { @@ -277,6 +287,7 @@ public void clear() { this.start = 0; setHighestWriteIdIsSet(false); this.highestWriteId = 0; + this.errorMessage = null; } public long getId() { @@ -559,6 +570,29 @@ public void setHighestWriteIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHESTWRITEID_ISSET_ID, value); } + public String getErrorMessage() { + return this.errorMessage; + } + + public void setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + } + + public void unsetErrorMessage() { + this.errorMessage = null; + } + + /** Returns true if field errorMessage is set (has been assigned a value) and false otherwise */ + public boolean isSetErrorMessage() { + return this.errorMessage != null; + } + + public void setErrorMessageIsSet(boolean value) { + if (!value) { + this.errorMessage = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case ID: @@ -657,6 +691,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ERROR_MESSAGE: + if (value == null) { + unsetErrorMessage(); + } else { + setErrorMessage((String)value); + } + break; + } } @@ -698,6 +740,9 @@ public Object getFieldValue(_Fields field) { case HIGHEST_WRITE_ID: return getHighestWriteId(); + case ERROR_MESSAGE: + return getErrorMessage(); + } throw new IllegalStateException(); } @@ -733,6 +778,8 @@ public boolean isSet(_Fields field) { return isSetStart(); case HIGHEST_WRITE_ID: return isSetHighestWriteId(); + case ERROR_MESSAGE: + return isSetErrorMessage(); } throw new IllegalStateException(); } @@ -858,6 +905,15 @@ public boolean equals(CompactionInfoStruct that) { return false; } + boolean this_present_errorMessage = true && this.isSetErrorMessage(); + boolean that_present_errorMessage = true && that.isSetErrorMessage(); + if (this_present_errorMessage || that_present_errorMessage) { + if (!(this_present_errorMessage && that_present_errorMessage)) + return false; + if (!this.errorMessage.equals(that.errorMessage)) + return false; + } + return true; } @@ -925,6 +981,11 @@ public int hashCode() { if (present_highestWriteId) list.add(highestWriteId); + boolean present_errorMessage = true && (isSetErrorMessage()); + list.add(present_errorMessage); + if (present_errorMessage) + list.add(errorMessage); + return list.hashCode(); } @@ -1056,6 +1117,16 @@ public int compareTo(CompactionInfoStruct other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetErrorMessage()).compareTo(other.isSetErrorMessage()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetErrorMessage()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorMessage, other.errorMessage); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1171,6 +1242,16 @@ public String toString() { sb.append(this.highestWriteId); first = false; } + if (isSetErrorMessage()) { + if (!first) sb.append(", "); + sb.append("errorMessage:"); + if (this.errorMessage == null) { + sb.append("null"); + } else { + sb.append(this.errorMessage); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1328,6 +1409,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionInfoStruc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 13: // ERROR_MESSAGE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.errorMessage = iprot.readString(); + struct.setErrorMessageIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1409,6 +1498,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionInfoStru oprot.writeI64(struct.highestWriteId); oprot.writeFieldEnd(); } + if (struct.errorMessage != null) { + if (struct.isSetErrorMessage()) { + oprot.writeFieldBegin(ERROR_MESSAGE_FIELD_DESC); + oprot.writeString(struct.errorMessage); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1455,7 +1551,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruc if (struct.isSetHighestWriteId()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetErrorMessage()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } @@ -1480,6 +1579,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruc if (struct.isSetHighestWriteId()) { oprot.writeI64(struct.highestWriteId); } + if (struct.isSetErrorMessage()) { + oprot.writeString(struct.errorMessage); + } } @Override @@ -1493,7 +1595,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruct struct.setTablenameIsSet(true); struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32()); struct.setTypeIsSet(true); - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); @@ -1526,6 +1628,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruct struct.highestWriteId = iprot.readI64(); struct.setHighestWriteIdIsSet(true); } + if (incoming.get(8)) { + struct.errorMessage = iprot.readString(); + struct.setErrorMessageIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java index 8a5682a013..ea5c47eecc 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java @@ -51,6 +51,7 @@ private static final org.apache.thrift.protocol.TField END_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("endTime", org.apache.thrift.protocol.TType.I64, (short)11); private static final org.apache.thrift.protocol.TField HADOOP_JOB_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("hadoopJobId", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short)13); + private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)14); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -71,6 +72,7 @@ private long endTime; // optional private String hadoopJobId; // optional private long id; // optional + private String errorMessage; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -90,7 +92,8 @@ META_INFO((short)10, "metaInfo"), END_TIME((short)11, "endTime"), HADOOP_JOB_ID((short)12, "hadoopJobId"), - ID((short)13, "id"); + ID((short)13, "id"), + ERROR_MESSAGE((short)14, "errorMessage"); private static final Map byName = new HashMap(); @@ -131,6 +134,8 @@ public static _Fields findByThriftId(int fieldId) { return HADOOP_JOB_ID; case 13: // ID return ID; + case 14: // ERROR_MESSAGE + return ERROR_MESSAGE; default: return null; } @@ -176,7 +181,7 @@ public String getFieldName() { private static final int __ENDTIME_ISSET_ID = 2; private static final int __ID_ISSET_ID = 3; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID,_Fields.ERROR_MESSAGE}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -206,6 +211,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowCompactResponseElement.class, metaDataMap); } @@ -264,6 +271,9 @@ public ShowCompactResponseElement(ShowCompactResponseElement other) { this.hadoopJobId = other.hadoopJobId; } this.id = other.id; + if (other.isSetErrorMessage()) { + this.errorMessage = other.errorMessage; + } } public ShowCompactResponseElement deepCopy() { @@ -290,6 +300,7 @@ public void clear() { setIdIsSet(false); this.id = 0; + this.errorMessage = null; } public String getDbname() { @@ -595,6 +606,29 @@ public void setIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value); } + public String getErrorMessage() { + return this.errorMessage; + } + + public void setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + } + + public void unsetErrorMessage() { + this.errorMessage = null; + } + + /** Returns true if field errorMessage is set (has been assigned a value) and false otherwise */ + public boolean isSetErrorMessage() { + return this.errorMessage != null; + } + + public void setErrorMessageIsSet(boolean value) { + if (!value) { + this.errorMessage = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -701,6 +735,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ERROR_MESSAGE: + if (value == null) { + unsetErrorMessage(); + } else { + setErrorMessage((String)value); + } + break; + } } @@ -745,6 +787,9 @@ public Object getFieldValue(_Fields field) { case ID: return getId(); + case ERROR_MESSAGE: + return getErrorMessage(); + } throw new IllegalStateException(); } @@ -782,6 +827,8 @@ public boolean isSet(_Fields field) { return isSetHadoopJobId(); case ID: return isSetId(); + case ERROR_MESSAGE: + return isSetErrorMessage(); } throw new IllegalStateException(); } @@ -916,6 +963,15 @@ public boolean equals(ShowCompactResponseElement that) { return false; } + boolean this_present_errorMessage = true && this.isSetErrorMessage(); + boolean that_present_errorMessage = true && that.isSetErrorMessage(); + if (this_present_errorMessage || that_present_errorMessage) { + if (!(this_present_errorMessage && that_present_errorMessage)) + return false; + if (!this.errorMessage.equals(that.errorMessage)) + return false; + } + return true; } @@ -988,6 +1044,11 @@ public int hashCode() { if (present_id) list.add(id); + boolean present_errorMessage = true && (isSetErrorMessage()); + list.add(present_errorMessage); + if (present_errorMessage) + list.add(errorMessage); + return list.hashCode(); } @@ -1129,6 +1190,16 @@ public int compareTo(ShowCompactResponseElement other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetErrorMessage()).compareTo(other.isSetErrorMessage()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetErrorMessage()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorMessage, other.errorMessage); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1254,6 +1325,16 @@ public String toString() { sb.append(this.id); first = false; } + if (isSetErrorMessage()) { + if (!first) sb.append(", "); + sb.append("errorMessage:"); + if (this.errorMessage == null) { + sb.append("null"); + } else { + sb.append(this.errorMessage); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1419,6 +1500,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 14: // ERROR_MESSAGE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.errorMessage = iprot.readString(); + struct.setErrorMessageIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1507,6 +1596,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeI64(struct.id); oprot.writeFieldEnd(); } + if (struct.errorMessage != null) { + if (struct.isSetErrorMessage()) { + oprot.writeFieldBegin(ERROR_MESSAGE_FIELD_DESC); + oprot.writeString(struct.errorMessage); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1556,7 +1652,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse if (struct.isSetId()) { optionals.set(8); } - oprot.writeBitSet(optionals, 9); + if (struct.isSetErrorMessage()) { + optionals.set(9); + } + oprot.writeBitSet(optionals, 10); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } @@ -1584,6 +1683,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse if (struct.isSetId()) { oprot.writeI64(struct.id); } + if (struct.isSetErrorMessage()) { + oprot.writeString(struct.errorMessage); + } } @Override @@ -1597,7 +1699,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponseE struct.setTypeIsSet(true); struct.state = iprot.readString(); struct.setStateIsSet(true); - BitSet incoming = iprot.readBitSet(9); + BitSet incoming = iprot.readBitSet(10); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); @@ -1634,6 +1736,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponseE struct.id = iprot.readI64(); struct.setIdIsSet(true); } + if (incoming.get(9)) { + struct.errorMessage = iprot.readString(); + struct.setErrorMessageIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index 093ad4be27..b6ed664002 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -22369,6 +22369,10 @@ class CompactionInfoStruct { * @var int */ public $highestWriteId = null; + /** + * @var string + */ + public $errorMessage = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -22421,6 +22425,10 @@ class CompactionInfoStruct { 'var' => 'highestWriteId', 'type' => TType::I64, ), + 13 => array( + 'var' => 'errorMessage', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -22460,6 +22468,9 @@ class CompactionInfoStruct { if (isset($vals['highestWriteId'])) { $this->highestWriteId = $vals['highestWriteId']; } + if (isset($vals['errorMessage'])) { + $this->errorMessage = $vals['errorMessage']; + } } } @@ -22566,6 +22577,13 @@ class CompactionInfoStruct { $xfer += $input->skip($ftype); } break; + case 13: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->errorMessage); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -22639,6 +22657,11 @@ class CompactionInfoStruct { $xfer += $output->writeI64($this->highestWriteId); $xfer += $output->writeFieldEnd(); } + if ($this->errorMessage !== null) { + $xfer += $output->writeFieldBegin('errorMessage', TType::STRING, 13); + $xfer += $output->writeString($this->errorMessage); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -22952,6 +22975,10 @@ class ShowCompactResponseElement { * @var int */ public $id = null; + /** + * @var string + */ + public $errorMessage = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -23008,6 +23035,10 @@ class ShowCompactResponseElement { 'var' => 'id', 'type' => TType::I64, ), + 14 => array( + 'var' => 'errorMessage', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -23050,6 +23081,9 @@ class ShowCompactResponseElement { if (isset($vals['id'])) { $this->id = $vals['id']; } + if (isset($vals['errorMessage'])) { + $this->errorMessage = $vals['errorMessage']; + } } } @@ -23163,6 +23197,13 @@ class ShowCompactResponseElement { $xfer += $input->skip($ftype); } break; + case 14: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->errorMessage); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -23241,6 +23282,11 @@ class ShowCompactResponseElement { $xfer += $output->writeI64($this->id); $xfer += $output->writeFieldEnd(); } + if ($this->errorMessage !== null) { + $xfer += $output->writeFieldBegin('errorMessage', TType::STRING, 14); + $xfer += $output->writeString($this->errorMessage); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 0dcca59b68..a68392f9d5 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -15551,6 +15551,7 @@ class CompactionInfoStruct: - workerId - start - highestWriteId + - errorMessage """ thrift_spec = ( @@ -15567,9 +15568,10 @@ class CompactionInfoStruct: (10, TType.STRING, 'workerId', None, None, ), # 10 (11, TType.I64, 'start', None, None, ), # 11 (12, TType.I64, 'highestWriteId', None, None, ), # 12 + (13, TType.STRING, 'errorMessage', None, None, ), # 13 ) - def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, toomanyaborts=None, state=None, workerId=None, start=None, highestWriteId=None,): + def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, toomanyaborts=None, state=None, workerId=None, start=None, highestWriteId=None, errorMessage=None,): self.id = id self.dbname = dbname self.tablename = tablename @@ -15582,6 +15584,7 @@ def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, typ self.workerId = workerId self.start = start self.highestWriteId = highestWriteId + self.errorMessage = errorMessage def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -15652,6 +15655,11 @@ def read(self, iprot): self.highestWriteId = iprot.readI64() else: iprot.skip(ftype) + elif fid == 13: + if ftype == TType.STRING: + self.errorMessage = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -15710,6 +15718,10 @@ def write(self, oprot): oprot.writeFieldBegin('highestWriteId', TType.I64, 12) oprot.writeI64(self.highestWriteId) oprot.writeFieldEnd() + if self.errorMessage is not None: + oprot.writeFieldBegin('errorMessage', TType.STRING, 13) + oprot.writeString(self.errorMessage) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -15739,6 +15751,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.workerId) value = (value * 31) ^ hash(self.start) value = (value * 31) ^ hash(self.highestWriteId) + value = (value * 31) ^ hash(self.errorMessage) return value def __repr__(self): @@ -15977,6 +15990,7 @@ class ShowCompactResponseElement: - endTime - hadoopJobId - id + - errorMessage """ thrift_spec = ( @@ -15994,9 +16008,10 @@ class ShowCompactResponseElement: (11, TType.I64, 'endTime', None, None, ), # 11 (12, TType.STRING, 'hadoopJobId', None, "None", ), # 12 (13, TType.I64, 'id', None, None, ), # 13 + (14, TType.STRING, 'errorMessage', None, None, ), # 14 ) - def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None, hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId=thrift_spec[12][4], id=None,): + def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None, hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId=thrift_spec[12][4], id=None, errorMessage=None,): self.dbname = dbname self.tablename = tablename self.partitionname = partitionname @@ -16010,6 +16025,7 @@ def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, s self.endTime = endTime self.hadoopJobId = hadoopJobId self.id = id + self.errorMessage = errorMessage def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -16085,6 +16101,11 @@ def read(self, iprot): self.id = iprot.readI64() else: iprot.skip(ftype) + elif fid == 14: + if ftype == TType.STRING: + self.errorMessage = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -16147,6 +16168,10 @@ def write(self, oprot): oprot.writeFieldBegin('id', TType.I64, 13) oprot.writeI64(self.id) oprot.writeFieldEnd() + if self.errorMessage is not None: + oprot.writeFieldBegin('errorMessage', TType.STRING, 14) + oprot.writeString(self.errorMessage) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -16177,6 +16202,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.endTime) value = (value * 31) ^ hash(self.hadoopJobId) value = (value * 31) ^ hash(self.id) + value = (value * 31) ^ hash(self.errorMessage) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index 8d7c32a765..aaca2fa4e5 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -3466,6 +3466,7 @@ class CompactionInfoStruct WORKERID = 10 START = 11 HIGHESTWRITEID = 12 + ERRORMESSAGE = 13 FIELDS = { ID => {:type => ::Thrift::Types::I64, :name => 'id'}, @@ -3479,7 +3480,8 @@ class CompactionInfoStruct STATE => {:type => ::Thrift::Types::STRING, :name => 'state', :optional => true}, WORKERID => {:type => ::Thrift::Types::STRING, :name => 'workerId', :optional => true}, START => {:type => ::Thrift::Types::I64, :name => 'start', :optional => true}, - HIGHESTWRITEID => {:type => ::Thrift::Types::I64, :name => 'highestWriteId', :optional => true} + HIGHESTWRITEID => {:type => ::Thrift::Types::I64, :name => 'highestWriteId', :optional => true}, + ERRORMESSAGE => {:type => ::Thrift::Types::STRING, :name => 'errorMessage', :optional => true} } def struct_fields; FIELDS; end @@ -3566,6 +3568,7 @@ class ShowCompactResponseElement ENDTIME = 11 HADOOPJOBID = 12 ID = 13 + ERRORMESSAGE = 14 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, @@ -3580,7 +3583,8 @@ class ShowCompactResponseElement METAINFO => {:type => ::Thrift::Types::STRING, :name => 'metaInfo', :optional => true}, ENDTIME => {:type => ::Thrift::Types::I64, :name => 'endTime', :optional => true}, HADOOPJOBID => {:type => ::Thrift::Types::STRING, :name => 'hadoopJobId', :default => %q"None", :optional => true}, - ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true} + ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true}, + ERRORMESSAGE => {:type => ::Thrift::Types::STRING, :name => 'errorMessage', :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 72ccdd1a0f..fbbe70f4dd 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -1155,6 +1155,7 @@ struct CompactionInfoStruct { 10: optional string workerId 11: optional i64 start 12: optional i64 highestWriteId + 13: optional string errorMessage } struct OptionalCompactionInfoStruct { @@ -1184,6 +1185,7 @@ struct ShowCompactResponseElement { 11: optional i64 endTime, 12: optional string hadoopJobId = "None", 13: optional i64 id, + 14: optional string errorMessage, } struct ShowCompactResponse { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java index ba45f39452..bf91ae704c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java @@ -59,6 +59,7 @@ public long highestWriteId; byte[] metaInfo; String hadoopJobId; + public String errorMessage; private String fullPartitionName = null; private String fullTableName = null; @@ -117,7 +118,8 @@ public String toString() { "properties:" + properties + "," + "runAs:" + runAs + "," + "tooManyAborts:" + tooManyAborts + "," + - "highestWriteId:" + highestWriteId; + "highestWriteId:" + highestWriteId + "," + + "errorMessage:" + errorMessage; } @Override @@ -159,6 +161,7 @@ static CompactionInfo loadFullFromCompactionQueue(ResultSet rs) throws SQLExcept fullCi.highestWriteId = rs.getLong(11); fullCi.metaInfo = rs.getBytes(12); fullCi.hadoopJobId = rs.getString(13); + fullCi.errorMessage = rs.getString(14); return fullCi; } static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionInfo ci, long endTime) throws SQLException { @@ -176,6 +179,7 @@ static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionIn pStmt.setLong(12, ci.highestWriteId); pStmt.setBytes(13, ci.metaInfo); pStmt.setString(14, ci.hadoopJobId); + pStmt.setString(15, ci.errorMessage); } public static CompactionInfo compactionStructToInfo(CompactionInfoStruct cr) { @@ -201,6 +205,9 @@ public static CompactionInfo compactionStructToInfo(CompactionInfoStruct cr) { if (cr.isSetHighestWriteId()) { ci.highestWriteId = cr.getHighestWriteId(); } + if (cr.isSetErrorMessage()) { + ci.errorMessage = cr.getErrorMessage(); + } return ci; } @@ -217,6 +224,7 @@ public static CompactionInfoStruct compactionInfoToStruct(CompactionInfo ci) { cr.setState(Character.toString(ci.state)); cr.setWorkerId(ci.workerId); cr.setHighestWriteId(ci.highestWriteId); + cr.setErrorMessage(ci.errorMessage); return cr; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index aded6f5486..bae23f773e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -369,7 +369,9 @@ public void markCleaned(CompactionInfo info) throws MetaException { ResultSet rs = null; try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); + pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, " + + "CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, " + + "CQ_HADOOP_JOB_ID, CQ_ERROR_MESSAGE from COMPACTION_QUEUE WHERE CQ_ID = ?"); pStmt.setLong(1, info.id); rs = pStmt.executeQuery(); if(rs.next()) { @@ -389,7 +391,10 @@ public void markCleaned(CompactionInfo info) throws MetaException { LOG.debug("Going to rollback"); dbConn.rollback(); } - pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); + pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, " + + "CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, " + + "CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID, CC_ERROR_MESSAGE) " + + "VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?)"); info.state = SUCCEEDED_STATE; CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn)); updCount = pStmt.executeUpdate(); @@ -1051,10 +1056,15 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho Statement stmt = null; PreparedStatement pStmt = null; ResultSet rs = null; + // the error message related to the failure is wrapped inside CompactionInfo + // fetch this info, since ci will be reused in subsequent queries + String errorMessage = ci.errorMessage; try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); + pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, " + + "CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, " + + "CQ_HADOOP_JOB_ID, CQ_ERROR_MESSAGE from COMPACTION_QUEUE WHERE CQ_ID = ?"); pStmt.setLong(1, ci.id); rs = pStmt.executeQuery(); if(rs.next()) { @@ -1088,7 +1098,13 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho close(rs, stmt, null); closeStmt(pStmt); - pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); + pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, " + + "CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, " + + "CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID, CC_ERROR_MESSAGE) " + + "VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?)"); + if (errorMessage != null) { + ci.errorMessage = errorMessage; + } CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn)); int updCount = pStmt.executeUpdate(); LOG.debug("Going to commit"); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index da5dd61d08..7f1c2f8bdc 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -153,7 +153,8 @@ public static synchronized void prepDb(Configuration conf) throws Exception { " CQ_RUN_AS varchar(128)," + " CQ_HIGHEST_WRITE_ID bigint," + " CQ_META_INFO varchar(2048) for bit data," + - " CQ_HADOOP_JOB_ID varchar(32))"); + " CQ_HADOOP_JOB_ID varchar(32)," + + " CQ_ERROR_MESSAGE clob)"); stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)"); stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)"); @@ -172,7 +173,8 @@ public static synchronized void prepDb(Configuration conf) throws Exception { " CC_RUN_AS varchar(128)," + " CC_HIGHEST_WRITE_ID bigint," + " CC_META_INFO varchar(2048) for bit data," + - " CC_HADOOP_JOB_ID varchar(32))"); + " CC_HADOOP_JOB_ID varchar(32)," + + " CC_ERROR_MESSAGE clob)"); stmt.execute("CREATE TABLE AUX_TABLE (" + " MT_KEY1 varchar(128) NOT NULL," + diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index aa62ca2120..7e76668f03 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -3193,7 +3193,8 @@ public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaExcep //-1 because 'null' literal doesn't work for all DBs... "\"CQ_START\", -1 \"CC_END\", \"CQ_RUN_AS\", \"CQ_HADOOP_JOB_ID\", \"CQ_ID\" FROM \"COMPACTION_QUEUE\" UNION ALL " + "SELECT \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", \"CC_WORKER_ID\", " + - "\"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", \"CC_ID\" FROM \"COMPLETED_COMPACTIONS\""; //todo: sort by cq_id? + "\"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", \"CC_ID\", \"CC_ERROR_MESSAGE\"" + + " FROM \"COMPLETED_COMPACTIONS\""; //todo: sort by cq_id? //what I want is order by cc_end desc, cc_start asc (but derby has a bug https://issues.apache.org/jira/browse/DERBY-6013) //to sort so that currently running jobs are at the end of the list (bottom of screen) //and currently running ones are in sorted by start time @@ -3224,6 +3225,7 @@ public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaExcep e.setRunAs(rs.getString(9)); e.setHadoopJobId(rs.getString(10)); e.setId(rs.getLong(11)); + e.setErrorMessage(rs.getString(12)); response.addToCompacts(e); } LOG.debug("Going to rollback"); diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index 6710271886..3be5707fa0 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -587,7 +587,8 @@ CREATE TABLE COMPACTION_QUEUE ( CQ_RUN_AS varchar(128), CQ_HIGHEST_WRITE_ID bigint, CQ_META_INFO varchar(2048) for bit data, - CQ_HADOOP_JOB_ID varchar(32) + CQ_HADOOP_JOB_ID varchar(32), + CQ_ERROR_MESSAGE clob ); CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( @@ -609,7 +610,8 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_RUN_AS varchar(128), CC_HIGHEST_WRITE_ID bigint, CC_META_INFO varchar(2048) for bit data, - CC_HADOOP_JOB_ID varchar(32) + CC_HADOOP_JOB_ID varchar(32), + CC_ERROR_MESSAGE clob ); CREATE TABLE AUX_TABLE ( diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql index ae0a32541a..a7d8da4e1c 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql @@ -48,6 +48,11 @@ CREATE INDEX LASTUPDATETIMEINDEX ON APP.SCHEDULED_EXECUTIONS (LAST_UPDATE_TIME); CREATE INDEX SCHEDULED_EXECUTIONS_SCHQID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID); CREATE UNIQUE INDEX SCHEDULED_EXECUTIONS_UNIQUE_ID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID); +-- HIVE-22729 +ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE clob; +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE clob; + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; + diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 221d4f1fff..29b7b3f746 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -1012,6 +1012,7 @@ CREATE TABLE COMPACTION_QUEUE( CQ_HIGHEST_WRITE_ID bigint NULL, CQ_META_INFO varbinary(2048) NULL, CQ_HADOOP_JOB_ID nvarchar(128) NULL, + CQ_ERROR_MESSAGE varchar(max) NULL, PRIMARY KEY CLUSTERED ( CQ_ID ASC @@ -1033,6 +1034,7 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_HIGHEST_WRITE_ID bigint NULL, CC_META_INFO varbinary(2048) NULL, CC_HADOOP_JOB_ID nvarchar(128) NULL, + CC_ERROR_MESSAGE varchar(max) NULL, PRIMARY KEY CLUSTERED ( CC_ID ASC diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql index bc98d5fc4a..72733c98ba 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql @@ -22,6 +22,10 @@ UPDATE TAB_COL_STATS SET ENGINE = 'hive' WHERE ENGINE IS NULL; ALTER TABLE PART_COL_STATS ADD ENGINE nvarchar(128); UPDATE PART_COL_STATS SET ENGINE = 'hive' WHERE ENGINE IS NULL; +-- HIVE-22729 +ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE varchar(max) NULL; +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE varchar(max) NULL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index dd761a66db..69e2cef57d 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -1070,7 +1070,8 @@ CREATE TABLE COMPACTION_QUEUE ( CQ_RUN_AS varchar(128), CQ_HIGHEST_WRITE_ID bigint, CQ_META_INFO varbinary(2048), - CQ_HADOOP_JOB_ID varchar(32) + CQ_HADOOP_JOB_ID varchar(32), + CQ_ERROR_MESSAGE mediumtext ) ENGINE=InnoDB DEFAULT CHARSET=latin1; CREATE TABLE COMPLETED_COMPACTIONS ( @@ -1087,7 +1088,8 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_RUN_AS varchar(128), CC_HIGHEST_WRITE_ID bigint, CC_META_INFO varbinary(2048), - CC_HADOOP_JOB_ID varchar(32) + CC_HADOOP_JOB_ID varchar(32), + CC_ERROR_MESSAGE mediumtext ) ENGINE=InnoDB DEFAULT CHARSET=latin1; CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql index 6a040a6a64..c81d08a324 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql @@ -52,7 +52,10 @@ CREATE INDEX IDX_SCHEDULED_EXECUTIONS_LAST_UPDATE_TIME ON SCHEDULED_EXECUTIONS ( CREATE INDEX IDX_SCHEDULED_EXECUTIONS_SCHEDULED_QUERY_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID); CREATE UNIQUE INDEX UNIQUE_SCHEDULED_EXECUTIONS_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID); +-- HIVE-22729 +ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE mediumtext; +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE mediumtext; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; - diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index f5ec1ba1af..cb95a42499 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -1051,7 +1051,8 @@ CREATE TABLE COMPACTION_QUEUE ( CQ_RUN_AS varchar(128), CQ_HIGHEST_WRITE_ID NUMBER(19), CQ_META_INFO BLOB, - CQ_HADOOP_JOB_ID varchar2(32) + CQ_HADOOP_JOB_ID varchar2(32), + CQ_ERROR_MESSAGE CLOB ) ROWDEPENDENCIES; CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( @@ -1073,7 +1074,8 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_RUN_AS varchar(128), CC_HIGHEST_WRITE_ID NUMBER(19), CC_META_INFO BLOB, - CC_HADOOP_JOB_ID varchar2(32) + CC_HADOOP_JOB_ID varchar2(32), + CC_ERROR_MESSAGE CLOB ) ROWDEPENDENCIES; CREATE TABLE AUX_TABLE ( diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql index c7738be273..65057be688 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql @@ -52,7 +52,10 @@ CREATE TABLE "SCHEDULED_EXECUTIONS" ( CREATE INDEX IDX_SCHEDULED_EX_LAST_UPDATE ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME"); CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); +-- HIVE-22729 +ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE CLOB; +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE CLOB; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual; - diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index 455f98b725..0fcb88aeb3 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -1737,7 +1737,8 @@ CREATE TABLE "COMPACTION_QUEUE" ( "CQ_RUN_AS" varchar(128), "CQ_HIGHEST_WRITE_ID" bigint, "CQ_META_INFO" bytea, - "CQ_HADOOP_JOB_ID" varchar(32) + "CQ_HADOOP_JOB_ID" varchar(32), + "CQ_ERROR_MESSAGE" text ); CREATE TABLE "NEXT_COMPACTION_QUEUE_ID" ( @@ -1759,7 +1760,8 @@ CREATE TABLE "COMPLETED_COMPACTIONS" ( "CC_RUN_AS" varchar(128), "CC_HIGHEST_WRITE_ID" bigint, "CC_META_INFO" bytea, - "CC_HADOOP_JOB_ID" varchar(32) + "CC_HADOOP_JOB_ID" varchar(32), + "CC_ERROR_MESSAGE" text ); CREATE TABLE "AUX_TABLE" ( diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql index 5c39b0d9f4..2347c697db 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql @@ -183,6 +183,9 @@ ALTER TABLE "WRITE_SET" RENAME COLUMN ws_txnid TO "WS_TXNID"; ALTER TABLE "WRITE_SET" RENAME COLUMN ws_commit_id TO "WS_COMMIT_ID"; ALTER TABLE "WRITE_SET" RENAME COLUMN ws_operation_type TO "WS_OPERATION_TYPE"; +-- HIVE-22729 +ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE clob; +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE clob; -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;