diff --git cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java index 3cdedba..b3cee11 100644 --- cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java +++ cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java @@ -100,6 +100,7 @@ public CliDriver() { } public int processCmd(String cmd) { + CliSessionState ss = (CliSessionState) SessionState.get(); ss.setLastCommand(cmd); // Flush the print stream, so it doesn't include output from the last command @@ -169,6 +170,7 @@ public int processCmd(String cmd) { } } } else if (ss.isRemoteMode()) { // remote mode -- connecting to remote hive server + HiveClient client = ss.getClient(); PrintStream out = ss.out; PrintStream err = ss.err; @@ -245,6 +247,7 @@ private String getFirstCmd(String cmd, int length) { } int processLocalCmd(String cmd, CommandProcessor proc, CliSessionState ss) { + int tryCount = 0; boolean needRetry; int ret = 0; diff --git conf/hive-default.xml.template conf/hive-default.xml.template index ba5b8a9..653f5cc 100644 --- conf/hive-default.xml.template +++ conf/hive-default.xml.template @@ -1,5 +1,7 @@ - +--> + diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 130fd67..29fee24 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -1473,8 +1473,8 @@ public void testColumnStatistics() throws Throwable { assertEquals(colStats2.getColName(), colName[0]); assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue); assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue); - assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls(), numNulls); - assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs(), numDVs); + assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls().longValue(), numNulls); + assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs().longValue(), numDVs); // test delete column stats; if no col name is passed all column stats associated with the // table is deleted @@ -1531,10 +1531,10 @@ public void testColumnStatistics() throws Throwable { assertNotNull(colStats2); assertEquals(colStats.getStatsDesc().getPartName(), partName); assertEquals(colStats2.getColName(), colName[1]); - assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen(), maxColLen); + assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen().longValue(), maxColLen); assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen); - assertEquals(colStats2.getStatsData().getStringStats().getNumNulls(), numNulls); - assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs); + assertEquals(colStats2.getStatsData().getStringStats().getNumNulls().longValue(), numNulls); + assertEquals(colStats2.getStatsData().getStringStats().getNumDVs().longValue(), numDVs); // test stats deletion at partition level client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1]); diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java index 1516b25..e946432 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java @@ -31,543 +31,624 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class BinaryColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BinaryColumnStatsData"); - - private static final org.apache.thrift.protocol.TField MAX_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("maxColLen", org.apache.thrift.protocol.TType.I64, (short)1); - private static final org.apache.thrift.protocol.TField AVG_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("avgColLen", org.apache.thrift.protocol.TType.DOUBLE, (short)2); - private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new BinaryColumnStatsDataStandardSchemeFactory()); - schemes.put(TupleScheme.class, new BinaryColumnStatsDataTupleSchemeFactory()); - } - - private long maxColLen; // required - private double avgColLen; // required - private long numNulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - MAX_COL_LEN((short)1, "maxColLen"), - AVG_COL_LEN((short)2, "avgColLen"), - NUM_NULLS((short)3, "numNulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // MAX_COL_LEN - return MAX_COL_LEN; - case 2: // AVG_COL_LEN - return AVG_COL_LEN; - case 3: // NUM_NULLS - return NUM_NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __MAXCOLLEN_ISSET_ID = 0; - private static final int __AVGCOLLEN_ISSET_ID = 1; - private static final int __NUMNULLS_ISSET_ID = 2; - private byte __isset_bitfield = 0; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.MAX_COL_LEN, new org.apache.thrift.meta_data.FieldMetaData("maxColLen", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.AVG_COL_LEN, new org.apache.thrift.meta_data.FieldMetaData("avgColLen", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BinaryColumnStatsData.class, metaDataMap); - } - - public BinaryColumnStatsData() { - } - - public BinaryColumnStatsData( - long maxColLen, - double avgColLen, - long numNulls) - { - this(); - this.maxColLen = maxColLen; - setMaxColLenIsSet(true); - this.avgColLen = avgColLen; - setAvgColLenIsSet(true); - this.numNulls = numNulls; - setNumNullsIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public BinaryColumnStatsData(BinaryColumnStatsData other) { - __isset_bitfield = other.__isset_bitfield; - this.maxColLen = other.maxColLen; - this.avgColLen = other.avgColLen; - this.numNulls = other.numNulls; - } - - public BinaryColumnStatsData deepCopy() { - return new BinaryColumnStatsData(this); - } - - @Override - public void clear() { - setMaxColLenIsSet(false); - this.maxColLen = 0; - setAvgColLenIsSet(false); - this.avgColLen = 0.0; - setNumNullsIsSet(false); - this.numNulls = 0; - } - - public long getMaxColLen() { - return this.maxColLen; - } - - public void setMaxColLen(long maxColLen) { - this.maxColLen = maxColLen; - setMaxColLenIsSet(true); - } - - public void unsetMaxColLen() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID); - } - - /** Returns true if field maxColLen is set (has been assigned a value) and false otherwise */ - public boolean isSetMaxColLen() { - return EncodingUtils.testBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID); - } - - public void setMaxColLenIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID, value); - } - - public double getAvgColLen() { - return this.avgColLen; - } - - public void setAvgColLen(double avgColLen) { - this.avgColLen = avgColLen; - setAvgColLenIsSet(true); - } - - public void unsetAvgColLen() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID); - } - - /** Returns true if field avgColLen is set (has been assigned a value) and false otherwise */ - public boolean isSetAvgColLen() { - return EncodingUtils.testBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID); - } - - public void setAvgColLenIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID, value); - } - - public long getNumNulls() { - return this.numNulls; - } - - public void setNumNulls(long numNulls) { - this.numNulls = numNulls; - setNumNullsIsSet(true); - } - - public void unsetNumNulls() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNumNulls() { - return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - public void setNumNullsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case MAX_COL_LEN: - if (value == null) { - unsetMaxColLen(); - } else { - setMaxColLen((Long)value); - } - break; - - case AVG_COL_LEN: - if (value == null) { - unsetAvgColLen(); - } else { - setAvgColLen((Double)value); - } - break; - - case NUM_NULLS: - if (value == null) { - unsetNumNulls(); - } else { - setNumNulls((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case MAX_COL_LEN: - return Long.valueOf(getMaxColLen()); - - case AVG_COL_LEN: - return Double.valueOf(getAvgColLen()); - - case NUM_NULLS: - return Long.valueOf(getNumNulls()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case MAX_COL_LEN: - return isSetMaxColLen(); - case AVG_COL_LEN: - return isSetAvgColLen(); - case NUM_NULLS: - return isSetNumNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof BinaryColumnStatsData) - return this.equals((BinaryColumnStatsData)that); - return false; - } - - public boolean equals(BinaryColumnStatsData that) { - if (that == null) - return false; - - boolean this_present_maxColLen = true; - boolean that_present_maxColLen = true; - if (this_present_maxColLen || that_present_maxColLen) { - if (!(this_present_maxColLen && that_present_maxColLen)) - return false; - if (this.maxColLen != that.maxColLen) - return false; - } - - boolean this_present_avgColLen = true; - boolean that_present_avgColLen = true; - if (this_present_avgColLen || that_present_avgColLen) { - if (!(this_present_avgColLen && that_present_avgColLen)) - return false; - if (this.avgColLen != that.avgColLen) - return false; - } - - boolean this_present_numNulls = true; - boolean that_present_numNulls = true; - if (this_present_numNulls || that_present_numNulls) { - if (!(this_present_numNulls && that_present_numNulls)) - return false; - if (this.numNulls != that.numNulls) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_maxColLen = true; - builder.append(present_maxColLen); - if (present_maxColLen) - builder.append(maxColLen); - - boolean present_avgColLen = true; - builder.append(present_avgColLen); - if (present_avgColLen) - builder.append(avgColLen); - - boolean present_numNulls = true; - builder.append(present_numNulls); - if (present_numNulls) - builder.append(numNulls); - - return builder.toHashCode(); - } - - public int compareTo(BinaryColumnStatsData other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - BinaryColumnStatsData typedOther = (BinaryColumnStatsData)other; - - lastComparison = Boolean.valueOf(isSetMaxColLen()).compareTo(typedOther.isSetMaxColLen()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetMaxColLen()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxColLen, typedOther.maxColLen); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetAvgColLen()).compareTo(typedOther.isSetAvgColLen()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetAvgColLen()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.avgColLen, typedOther.avgColLen); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(typedOther.isSetNumNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, typedOther.numNulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("BinaryColumnStatsData("); - boolean first = true; - - sb.append("maxColLen:"); - sb.append(this.maxColLen); - first = false; - if (!first) sb.append(", "); - sb.append("avgColLen:"); - sb.append(this.avgColLen); - first = false; - if (!first) sb.append(", "); - sb.append("numNulls:"); - sb.append(this.numNulls); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetMaxColLen()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxColLen' is unset! Struct:" + toString()); - } - - if (!isSetAvgColLen()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'avgColLen' is unset! Struct:" + toString()); - } - - if (!isSetNumNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class BinaryColumnStatsDataStandardSchemeFactory implements SchemeFactory { - public BinaryColumnStatsDataStandardScheme getScheme() { - return new BinaryColumnStatsDataStandardScheme(); - } - } - - private static class BinaryColumnStatsDataStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, BinaryColumnStatsData struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // MAX_COL_LEN - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.maxColLen = iprot.readI64(); - struct.setMaxColLenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // AVG_COL_LEN - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.avgColLen = iprot.readDouble(); - struct.setAvgColLenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // NUM_NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, BinaryColumnStatsData struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(MAX_COL_LEN_FIELD_DESC); - oprot.writeI64(struct.maxColLen); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(AVG_COL_LEN_FIELD_DESC); - oprot.writeDouble(struct.avgColLen); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); - oprot.writeI64(struct.numNulls); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class BinaryColumnStatsDataTupleSchemeFactory implements SchemeFactory { - public BinaryColumnStatsDataTupleScheme getScheme() { - return new BinaryColumnStatsDataTupleScheme(); - } - } - - private static class BinaryColumnStatsDataTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, BinaryColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI64(struct.maxColLen); - oprot.writeDouble(struct.avgColLen); - oprot.writeI64(struct.numNulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, BinaryColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.maxColLen = iprot.readI64(); - struct.setMaxColLenIsSet(true); - struct.avgColLen = iprot.readDouble(); - struct.setAvgColLenIsSet(true); - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - } - } +public class BinaryColumnStatsData + implements + org.apache.thrift.TBase, + java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct( + "BinaryColumnStatsData"); + + private static final org.apache.thrift.protocol.TField MAX_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField( + "maxColLen", org.apache.thrift.protocol.TType.I64, (short) 1); + private static final org.apache.thrift.protocol.TField AVG_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField( + "avgColLen", org.apache.thrift.protocol.TType.DOUBLE, (short) 2); + private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numNulls", org.apache.thrift.protocol.TType.I64, (short) 3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, + new BinaryColumnStatsDataStandardSchemeFactory()); + schemes.put(TupleScheme.class, + new BinaryColumnStatsDataTupleSchemeFactory()); + } + + private long maxColLen; // required + private double avgColLen; // required + private long numNulls; // required + + /** + * The set of fields this struct contains, along with convenience methods + * for finding and manipulating them. + */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MAX_COL_LEN((short) 1, "maxColLen"), AVG_COL_LEN((short) 2, "avgColLen"), NUM_NULLS( + (short) 3, "numNulls"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not + * found. + */ + public static _Fields findByThriftId(int fieldId) { + switch (fieldId) { + case 1: // MAX_COL_LEN + return MAX_COL_LEN; + case 2: // AVG_COL_LEN + return AVG_COL_LEN; + case 3: // NUM_NULLS + return NUM_NULLS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) + throw new IllegalArgumentException("Field " + fieldId + + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not + * found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __MAXCOLLEN_ISSET_ID = 0; + private static final int __AVGCOLLEN_ISSET_ID = 1; + private static final int __NUMNULLS_ISSET_ID = 2; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>( + _Fields.class); + tmpMap.put(_Fields.MAX_COL_LEN, + new org.apache.thrift.meta_data.FieldMetaData("maxColLen", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.AVG_COL_LEN, + new org.apache.thrift.meta_data.FieldMetaData("avgColLen", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.DOUBLE))); + tmpMap.put(_Fields.NUM_NULLS, + new org.apache.thrift.meta_data.FieldMetaData("numNulls", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap( + BinaryColumnStatsData.class, metaDataMap); + } + + public BinaryColumnStatsData() { + } + + public BinaryColumnStatsData(long maxColLen, double avgColLen, long numNulls) { + this(); + this.maxColLen = maxColLen; + setMaxColLenIsSet(true); + this.avgColLen = avgColLen; + setAvgColLenIsSet(true); + this.numNulls = numNulls; + setNumNullsIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public BinaryColumnStatsData(BinaryColumnStatsData other) { + __isset_bitfield = other.__isset_bitfield; + this.maxColLen = other.maxColLen; + this.avgColLen = other.avgColLen; + this.numNulls = other.numNulls; + } + + public BinaryColumnStatsData deepCopy() { + return new BinaryColumnStatsData(this); + } + + @Override + public void clear() { + setMaxColLenIsSet(false); + // this.maxColLen = 0; + setAvgColLenIsSet(false); + // this.avgColLen = 0.0; + setNumNullsIsSet(false); + // this.numNulls = 0; + } + + public Long getMaxColLen() { + if (isSetMaxColLen()) + return this.maxColLen; + else + return null; + } + + public void setMaxColLen(long maxColLen) { + this.maxColLen = maxColLen; + setMaxColLenIsSet(true); + } + + public void unsetMaxColLen() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __MAXCOLLEN_ISSET_ID); + } + + /** + * Returns true if field maxColLen is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetMaxColLen() { + return EncodingUtils.testBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID); + } + + public void setMaxColLenIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __MAXCOLLEN_ISSET_ID, value); + } + + public Double getAvgColLen() { + if (isSetAvgColLen()) + return this.avgColLen; + else + return null; + } + + public void setAvgColLen(double avgColLen) { + this.avgColLen = avgColLen; + setAvgColLenIsSet(true); + } + + public void unsetAvgColLen() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __AVGCOLLEN_ISSET_ID); + } + + /** + * Returns true if field avgColLen is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetAvgColLen() { + return EncodingUtils.testBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID); + } + + public void setAvgColLenIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __AVGCOLLEN_ISSET_ID, value); + } + + public Long getNumNulls() { + if (isSetNumNulls()) + return this.numNulls; + else + return null; + } + + public void setNumNulls(long numNulls) { + this.numNulls = numNulls; + setNumNullsIsSet(true); + } + + public void unsetNumNulls() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMNULLS_ISSET_ID); + } + + /** + * Returns true if field numNulls is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetNumNulls() { + return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); + } + + public void setNumNullsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMNULLS_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MAX_COL_LEN: + if (value == null) { + unsetMaxColLen(); + } else { + setMaxColLen((Long) value); + } + break; + + case AVG_COL_LEN: + if (value == null) { + unsetAvgColLen(); + } else { + setAvgColLen((Double) value); + } + break; + + case NUM_NULLS: + if (value == null) { + unsetNumNulls(); + } else { + setNumNulls((Long) value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MAX_COL_LEN: + return Long.valueOf(getMaxColLen()); + + case AVG_COL_LEN: + return Double.valueOf(getAvgColLen()); + + case NUM_NULLS: + return Long.valueOf(getNumNulls()); + + } + throw new IllegalStateException(); + } + + /** + * Returns true if field corresponding to fieldID is set (has been assigned + * a value) and false otherwise + */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MAX_COL_LEN: + return isSetMaxColLen(); + case AVG_COL_LEN: + return isSetAvgColLen(); + case NUM_NULLS: + return isSetNumNulls(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof BinaryColumnStatsData) + return this.equals((BinaryColumnStatsData) that); + return false; + } + + public boolean equals(BinaryColumnStatsData that) { + if (that == null) + return false; + + boolean this_present_maxColLen = true; + boolean that_present_maxColLen = true; + if (this_present_maxColLen || that_present_maxColLen) { + if (!(this_present_maxColLen && that_present_maxColLen)) + return false; + if (this.maxColLen != that.maxColLen) + return false; + } + + boolean this_present_avgColLen = true; + boolean that_present_avgColLen = true; + if (this_present_avgColLen || that_present_avgColLen) { + if (!(this_present_avgColLen && that_present_avgColLen)) + return false; + if (this.avgColLen != that.avgColLen) + return false; + } + + boolean this_present_numNulls = true; + boolean that_present_numNulls = true; + if (this_present_numNulls || that_present_numNulls) { + if (!(this_present_numNulls && that_present_numNulls)) + return false; + if (this.numNulls != that.numNulls) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_maxColLen = true; + builder.append(present_maxColLen); + if (present_maxColLen) + builder.append(maxColLen); + + boolean present_avgColLen = true; + builder.append(present_avgColLen); + if (present_avgColLen) + builder.append(avgColLen); + + boolean present_numNulls = true; + builder.append(present_numNulls); + if (present_numNulls) + builder.append(numNulls); + + return builder.toHashCode(); + } + + public int compareTo(BinaryColumnStatsData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + BinaryColumnStatsData typedOther = (BinaryColumnStatsData) other; + + lastComparison = Boolean.valueOf(isSetMaxColLen()).compareTo( + typedOther.isSetMaxColLen()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMaxColLen()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.maxColLen, typedOther.maxColLen); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetAvgColLen()).compareTo( + typedOther.isSetAvgColLen()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetAvgColLen()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.avgColLen, typedOther.avgColLen); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo( + typedOther.isSetNumNulls()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumNulls()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numNulls, typedOther.numNulls); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) + throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) + throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("BinaryColumnStatsData("); + boolean first = true; + + sb.append("maxColLen:"); + sb.append(this.maxColLen); + first = false; + if (!first) + sb.append(", "); + sb.append("avgColLen:"); + sb.append(this.avgColLen); + first = false; + if (!first) + sb.append(", "); + sb.append("numNulls:"); + sb.append(this.numNulls); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetMaxColLen()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'maxColLen' is unset! Struct:" + toString()); + } + + if (!isSetAvgColLen()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'avgColLen' is unset! Struct:" + toString()); + } + + if (!isSetNumNulls()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numNulls' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) + throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) + throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java + // serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class BinaryColumnStatsDataStandardSchemeFactory implements + SchemeFactory { + public BinaryColumnStatsDataStandardScheme getScheme() { + return new BinaryColumnStatsDataStandardScheme(); + } + } + + private static class BinaryColumnStatsDataStandardScheme extends + StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, + BinaryColumnStatsData struct) + throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MAX_COL_LEN + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.maxColLen = iprot.readI64(); + struct.setMaxColLenIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 2: // AVG_COL_LEN + if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { + struct.avgColLen = iprot.readDouble(); + struct.setAvgColLenIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 3: // NUM_NULLS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, + BinaryColumnStatsData struct) + throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(MAX_COL_LEN_FIELD_DESC); + oprot.writeI64(struct.maxColLen); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(AVG_COL_LEN_FIELD_DESC); + oprot.writeDouble(struct.avgColLen); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); + oprot.writeI64(struct.numNulls); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class BinaryColumnStatsDataTupleSchemeFactory implements + SchemeFactory { + public BinaryColumnStatsDataTupleScheme getScheme() { + return new BinaryColumnStatsDataTupleScheme(); + } + } + + private static class BinaryColumnStatsDataTupleScheme extends + TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, + BinaryColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.maxColLen); + oprot.writeDouble(struct.avgColLen); + oprot.writeI64(struct.numNulls); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, + BinaryColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.maxColLen = iprot.readI64(); + struct.setMaxColLenIsSet(true); + struct.avgColLen = iprot.readDouble(); + struct.setAvgColLenIsSet(true); + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + } + } } - diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java index 9ef9c0f..32658aa 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java @@ -31,543 +31,624 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class BooleanColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BooleanColumnStatsData"); - - private static final org.apache.thrift.protocol.TField NUM_TRUES_FIELD_DESC = new org.apache.thrift.protocol.TField("numTrues", org.apache.thrift.protocol.TType.I64, (short)1); - private static final org.apache.thrift.protocol.TField NUM_FALSES_FIELD_DESC = new org.apache.thrift.protocol.TField("numFalses", org.apache.thrift.protocol.TType.I64, (short)2); - private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new BooleanColumnStatsDataStandardSchemeFactory()); - schemes.put(TupleScheme.class, new BooleanColumnStatsDataTupleSchemeFactory()); - } - - private long numTrues; // required - private long numFalses; // required - private long numNulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - NUM_TRUES((short)1, "numTrues"), - NUM_FALSES((short)2, "numFalses"), - NUM_NULLS((short)3, "numNulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // NUM_TRUES - return NUM_TRUES; - case 2: // NUM_FALSES - return NUM_FALSES; - case 3: // NUM_NULLS - return NUM_NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __NUMTRUES_ISSET_ID = 0; - private static final int __NUMFALSES_ISSET_ID = 1; - private static final int __NUMNULLS_ISSET_ID = 2; - private byte __isset_bitfield = 0; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NUM_TRUES, new org.apache.thrift.meta_data.FieldMetaData("numTrues", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.NUM_FALSES, new org.apache.thrift.meta_data.FieldMetaData("numFalses", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BooleanColumnStatsData.class, metaDataMap); - } - - public BooleanColumnStatsData() { - } - - public BooleanColumnStatsData( - long numTrues, - long numFalses, - long numNulls) - { - this(); - this.numTrues = numTrues; - setNumTruesIsSet(true); - this.numFalses = numFalses; - setNumFalsesIsSet(true); - this.numNulls = numNulls; - setNumNullsIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public BooleanColumnStatsData(BooleanColumnStatsData other) { - __isset_bitfield = other.__isset_bitfield; - this.numTrues = other.numTrues; - this.numFalses = other.numFalses; - this.numNulls = other.numNulls; - } - - public BooleanColumnStatsData deepCopy() { - return new BooleanColumnStatsData(this); - } - - @Override - public void clear() { - setNumTruesIsSet(false); - this.numTrues = 0; - setNumFalsesIsSet(false); - this.numFalses = 0; - setNumNullsIsSet(false); - this.numNulls = 0; - } - - public long getNumTrues() { - return this.numTrues; - } - - public void setNumTrues(long numTrues) { - this.numTrues = numTrues; - setNumTruesIsSet(true); - } - - public void unsetNumTrues() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMTRUES_ISSET_ID); - } - - /** Returns true if field numTrues is set (has been assigned a value) and false otherwise */ - public boolean isSetNumTrues() { - return EncodingUtils.testBit(__isset_bitfield, __NUMTRUES_ISSET_ID); - } - - public void setNumTruesIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMTRUES_ISSET_ID, value); - } - - public long getNumFalses() { - return this.numFalses; - } - - public void setNumFalses(long numFalses) { - this.numFalses = numFalses; - setNumFalsesIsSet(true); - } - - public void unsetNumFalses() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMFALSES_ISSET_ID); - } - - /** Returns true if field numFalses is set (has been assigned a value) and false otherwise */ - public boolean isSetNumFalses() { - return EncodingUtils.testBit(__isset_bitfield, __NUMFALSES_ISSET_ID); - } - - public void setNumFalsesIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMFALSES_ISSET_ID, value); - } - - public long getNumNulls() { - return this.numNulls; - } - - public void setNumNulls(long numNulls) { - this.numNulls = numNulls; - setNumNullsIsSet(true); - } - - public void unsetNumNulls() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNumNulls() { - return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - public void setNumNullsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case NUM_TRUES: - if (value == null) { - unsetNumTrues(); - } else { - setNumTrues((Long)value); - } - break; - - case NUM_FALSES: - if (value == null) { - unsetNumFalses(); - } else { - setNumFalses((Long)value); - } - break; - - case NUM_NULLS: - if (value == null) { - unsetNumNulls(); - } else { - setNumNulls((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case NUM_TRUES: - return Long.valueOf(getNumTrues()); - - case NUM_FALSES: - return Long.valueOf(getNumFalses()); - - case NUM_NULLS: - return Long.valueOf(getNumNulls()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case NUM_TRUES: - return isSetNumTrues(); - case NUM_FALSES: - return isSetNumFalses(); - case NUM_NULLS: - return isSetNumNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof BooleanColumnStatsData) - return this.equals((BooleanColumnStatsData)that); - return false; - } - - public boolean equals(BooleanColumnStatsData that) { - if (that == null) - return false; - - boolean this_present_numTrues = true; - boolean that_present_numTrues = true; - if (this_present_numTrues || that_present_numTrues) { - if (!(this_present_numTrues && that_present_numTrues)) - return false; - if (this.numTrues != that.numTrues) - return false; - } - - boolean this_present_numFalses = true; - boolean that_present_numFalses = true; - if (this_present_numFalses || that_present_numFalses) { - if (!(this_present_numFalses && that_present_numFalses)) - return false; - if (this.numFalses != that.numFalses) - return false; - } - - boolean this_present_numNulls = true; - boolean that_present_numNulls = true; - if (this_present_numNulls || that_present_numNulls) { - if (!(this_present_numNulls && that_present_numNulls)) - return false; - if (this.numNulls != that.numNulls) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_numTrues = true; - builder.append(present_numTrues); - if (present_numTrues) - builder.append(numTrues); - - boolean present_numFalses = true; - builder.append(present_numFalses); - if (present_numFalses) - builder.append(numFalses); - - boolean present_numNulls = true; - builder.append(present_numNulls); - if (present_numNulls) - builder.append(numNulls); - - return builder.toHashCode(); - } - - public int compareTo(BooleanColumnStatsData other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - BooleanColumnStatsData typedOther = (BooleanColumnStatsData)other; - - lastComparison = Boolean.valueOf(isSetNumTrues()).compareTo(typedOther.isSetNumTrues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumTrues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numTrues, typedOther.numTrues); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumFalses()).compareTo(typedOther.isSetNumFalses()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumFalses()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numFalses, typedOther.numFalses); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(typedOther.isSetNumNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, typedOther.numNulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("BooleanColumnStatsData("); - boolean first = true; - - sb.append("numTrues:"); - sb.append(this.numTrues); - first = false; - if (!first) sb.append(", "); - sb.append("numFalses:"); - sb.append(this.numFalses); - first = false; - if (!first) sb.append(", "); - sb.append("numNulls:"); - sb.append(this.numNulls); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetNumTrues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numTrues' is unset! Struct:" + toString()); - } - - if (!isSetNumFalses()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numFalses' is unset! Struct:" + toString()); - } - - if (!isSetNumNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class BooleanColumnStatsDataStandardSchemeFactory implements SchemeFactory { - public BooleanColumnStatsDataStandardScheme getScheme() { - return new BooleanColumnStatsDataStandardScheme(); - } - } - - private static class BooleanColumnStatsDataStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, BooleanColumnStatsData struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // NUM_TRUES - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numTrues = iprot.readI64(); - struct.setNumTruesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NUM_FALSES - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numFalses = iprot.readI64(); - struct.setNumFalsesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // NUM_NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, BooleanColumnStatsData struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(NUM_TRUES_FIELD_DESC); - oprot.writeI64(struct.numTrues); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(NUM_FALSES_FIELD_DESC); - oprot.writeI64(struct.numFalses); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); - oprot.writeI64(struct.numNulls); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class BooleanColumnStatsDataTupleSchemeFactory implements SchemeFactory { - public BooleanColumnStatsDataTupleScheme getScheme() { - return new BooleanColumnStatsDataTupleScheme(); - } - } - - private static class BooleanColumnStatsDataTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, BooleanColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI64(struct.numTrues); - oprot.writeI64(struct.numFalses); - oprot.writeI64(struct.numNulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, BooleanColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.numTrues = iprot.readI64(); - struct.setNumTruesIsSet(true); - struct.numFalses = iprot.readI64(); - struct.setNumFalsesIsSet(true); - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - } - } +public class BooleanColumnStatsData + implements + org.apache.thrift.TBase, + java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct( + "BooleanColumnStatsData"); + + private static final org.apache.thrift.protocol.TField NUM_TRUES_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numTrues", org.apache.thrift.protocol.TType.I64, (short) 1); + private static final org.apache.thrift.protocol.TField NUM_FALSES_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numFalses", org.apache.thrift.protocol.TType.I64, (short) 2); + private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numNulls", org.apache.thrift.protocol.TType.I64, (short) 3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, + new BooleanColumnStatsDataStandardSchemeFactory()); + schemes.put(TupleScheme.class, + new BooleanColumnStatsDataTupleSchemeFactory()); + } + + private long numTrues; // required + private long numFalses; // required + private long numNulls; // required + + /** + * The set of fields this struct contains, along with convenience methods + * for finding and manipulating them. + */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NUM_TRUES((short) 1, "numTrues"), NUM_FALSES((short) 2, "numFalses"), NUM_NULLS( + (short) 3, "numNulls"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not + * found. + */ + public static _Fields findByThriftId(int fieldId) { + switch (fieldId) { + case 1: // NUM_TRUES + return NUM_TRUES; + case 2: // NUM_FALSES + return NUM_FALSES; + case 3: // NUM_NULLS + return NUM_NULLS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) + throw new IllegalArgumentException("Field " + fieldId + + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not + * found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __NUMTRUES_ISSET_ID = 0; + private static final int __NUMFALSES_ISSET_ID = 1; + private static final int __NUMNULLS_ISSET_ID = 2; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>( + _Fields.class); + tmpMap.put(_Fields.NUM_TRUES, + new org.apache.thrift.meta_data.FieldMetaData("numTrues", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.NUM_FALSES, + new org.apache.thrift.meta_data.FieldMetaData("numFalses", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.NUM_NULLS, + new org.apache.thrift.meta_data.FieldMetaData("numNulls", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap( + BooleanColumnStatsData.class, metaDataMap); + } + + public BooleanColumnStatsData() { + } + + public BooleanColumnStatsData(long numTrues, long numFalses, long numNulls) { + this(); + this.numTrues = numTrues; + setNumTruesIsSet(true); + this.numFalses = numFalses; + setNumFalsesIsSet(true); + this.numNulls = numNulls; + setNumNullsIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public BooleanColumnStatsData(BooleanColumnStatsData other) { + __isset_bitfield = other.__isset_bitfield; + this.numTrues = other.numTrues; + this.numFalses = other.numFalses; + this.numNulls = other.numNulls; + } + + public BooleanColumnStatsData deepCopy() { + return new BooleanColumnStatsData(this); + } + + @Override + public void clear() { + setNumTruesIsSet(false); + // this.numTrues = 0; + setNumFalsesIsSet(false); + // this.numFalses = 0; + setNumNullsIsSet(false); + // this.numNulls = 0; + } + + public Long getNumTrues() { + if (isSetNumTrues()) + return this.numTrues; + else + return null; + } + + public void setNumTrues(long numTrues) { + this.numTrues = numTrues; + setNumTruesIsSet(true); + } + + public void unsetNumTrues() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMTRUES_ISSET_ID); + } + + /** + * Returns true if field numTrues is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetNumTrues() { + return EncodingUtils.testBit(__isset_bitfield, __NUMTRUES_ISSET_ID); + } + + public void setNumTruesIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMTRUES_ISSET_ID, value); + } + + public Long getNumFalses() { + if (isSetNumFalses()) + return this.numFalses; + else + return null; + } + + public void setNumFalses(long numFalses) { + this.numFalses = numFalses; + setNumFalsesIsSet(true); + } + + public void unsetNumFalses() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMFALSES_ISSET_ID); + } + + /** + * Returns true if field numFalses is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetNumFalses() { + return EncodingUtils.testBit(__isset_bitfield, __NUMFALSES_ISSET_ID); + } + + public void setNumFalsesIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMFALSES_ISSET_ID, value); + } + + public Long getNumNulls() { + if (isSetNumNulls()) + return this.numNulls; + else + return null; + } + + public void setNumNulls(long numNulls) { + this.numNulls = numNulls; + setNumNullsIsSet(true); + } + + public void unsetNumNulls() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMNULLS_ISSET_ID); + } + + /** + * Returns true if field numNulls is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetNumNulls() { + return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); + } + + public void setNumNullsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMNULLS_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NUM_TRUES: + if (value == null) { + unsetNumTrues(); + } else { + setNumTrues((Long) value); + } + break; + + case NUM_FALSES: + if (value == null) { + unsetNumFalses(); + } else { + setNumFalses((Long) value); + } + break; + + case NUM_NULLS: + if (value == null) { + unsetNumNulls(); + } else { + setNumNulls((Long) value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NUM_TRUES: + return Long.valueOf(getNumTrues()); + + case NUM_FALSES: + return Long.valueOf(getNumFalses()); + + case NUM_NULLS: + return Long.valueOf(getNumNulls()); + + } + throw new IllegalStateException(); + } + + /** + * Returns true if field corresponding to fieldID is set (has been assigned + * a value) and false otherwise + */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NUM_TRUES: + return isSetNumTrues(); + case NUM_FALSES: + return isSetNumFalses(); + case NUM_NULLS: + return isSetNumNulls(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof BooleanColumnStatsData) + return this.equals((BooleanColumnStatsData) that); + return false; + } + + public boolean equals(BooleanColumnStatsData that) { + if (that == null) + return false; + + boolean this_present_numTrues = true; + boolean that_present_numTrues = true; + if (this_present_numTrues || that_present_numTrues) { + if (!(this_present_numTrues && that_present_numTrues)) + return false; + if (this.numTrues != that.numTrues) + return false; + } + + boolean this_present_numFalses = true; + boolean that_present_numFalses = true; + if (this_present_numFalses || that_present_numFalses) { + if (!(this_present_numFalses && that_present_numFalses)) + return false; + if (this.numFalses != that.numFalses) + return false; + } + + boolean this_present_numNulls = true; + boolean that_present_numNulls = true; + if (this_present_numNulls || that_present_numNulls) { + if (!(this_present_numNulls && that_present_numNulls)) + return false; + if (this.numNulls != that.numNulls) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_numTrues = true; + builder.append(present_numTrues); + if (present_numTrues) + builder.append(numTrues); + + boolean present_numFalses = true; + builder.append(present_numFalses); + if (present_numFalses) + builder.append(numFalses); + + boolean present_numNulls = true; + builder.append(present_numNulls); + if (present_numNulls) + builder.append(numNulls); + + return builder.toHashCode(); + } + + public int compareTo(BooleanColumnStatsData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + BooleanColumnStatsData typedOther = (BooleanColumnStatsData) other; + + lastComparison = Boolean.valueOf(isSetNumTrues()).compareTo( + typedOther.isSetNumTrues()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumTrues()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numTrues, typedOther.numTrues); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumFalses()).compareTo( + typedOther.isSetNumFalses()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumFalses()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numFalses, typedOther.numFalses); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo( + typedOther.isSetNumNulls()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumNulls()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numNulls, typedOther.numNulls); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) + throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) + throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("BooleanColumnStatsData("); + boolean first = true; + + sb.append("numTrues:"); + sb.append(this.numTrues); + first = false; + if (!first) + sb.append(", "); + sb.append("numFalses:"); + sb.append(this.numFalses); + first = false; + if (!first) + sb.append(", "); + sb.append("numNulls:"); + sb.append(this.numNulls); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetNumTrues()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numTrues' is unset! Struct:" + toString()); + } + + if (!isSetNumFalses()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numFalses' is unset! Struct:" + toString()); + } + + if (!isSetNumNulls()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numNulls' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) + throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) + throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java + // serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class BooleanColumnStatsDataStandardSchemeFactory implements + SchemeFactory { + public BooleanColumnStatsDataStandardScheme getScheme() { + return new BooleanColumnStatsDataStandardScheme(); + } + } + + private static class BooleanColumnStatsDataStandardScheme extends + StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, + BooleanColumnStatsData struct) + throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NUM_TRUES + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numTrues = iprot.readI64(); + struct.setNumTruesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 2: // NUM_FALSES + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numFalses = iprot.readI64(); + struct.setNumFalsesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 3: // NUM_NULLS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, + BooleanColumnStatsData struct) + throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(NUM_TRUES_FIELD_DESC); + oprot.writeI64(struct.numTrues); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(NUM_FALSES_FIELD_DESC); + oprot.writeI64(struct.numFalses); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); + oprot.writeI64(struct.numNulls); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class BooleanColumnStatsDataTupleSchemeFactory implements + SchemeFactory { + public BooleanColumnStatsDataTupleScheme getScheme() { + return new BooleanColumnStatsDataTupleScheme(); + } + } + + private static class BooleanColumnStatsDataTupleScheme extends + TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, + BooleanColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.numTrues); + oprot.writeI64(struct.numFalses); + oprot.writeI64(struct.numNulls); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, + BooleanColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.numTrues = iprot.readI64(); + struct.setNumTruesIsSet(true); + struct.numFalses = iprot.readI64(); + struct.setNumFalsesIsSet(true); + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + } + } } - diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java index 951d479..f639ca5 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java @@ -31,674 +31,762 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class DecimalColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DecimalColumnStatsData"); - - private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("highValue", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3); - private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField("numDVs", org.apache.thrift.protocol.TType.I64, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new DecimalColumnStatsDataStandardSchemeFactory()); - schemes.put(TupleScheme.class, new DecimalColumnStatsDataTupleSchemeFactory()); - } - - private Decimal lowValue; // optional - private Decimal highValue; // optional - private long numNulls; // required - private long numDVs; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - LOW_VALUE((short)1, "lowValue"), - HIGH_VALUE((short)2, "highValue"), - NUM_NULLS((short)3, "numNulls"), - NUM_DVS((short)4, "numDVs"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // LOW_VALUE - return LOW_VALUE; - case 2: // HIGH_VALUE - return HIGH_VALUE; - case 3: // NUM_NULLS - return NUM_NULLS; - case 4: // NUM_DVS - return NUM_DVS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __NUMNULLS_ISSET_ID = 0; - private static final int __NUMDVS_ISSET_ID = 1; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.LOW_VALUE,_Fields.HIGH_VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.LOW_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lowValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Decimal.class))); - tmpMap.put(_Fields.HIGH_VALUE, new org.apache.thrift.meta_data.FieldMetaData("highValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Decimal.class))); - tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.NUM_DVS, new org.apache.thrift.meta_data.FieldMetaData("numDVs", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DecimalColumnStatsData.class, metaDataMap); - } - - public DecimalColumnStatsData() { - } - - public DecimalColumnStatsData( - long numNulls, - long numDVs) - { - this(); - this.numNulls = numNulls; - setNumNullsIsSet(true); - this.numDVs = numDVs; - setNumDVsIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public DecimalColumnStatsData(DecimalColumnStatsData other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetLowValue()) { - this.lowValue = new Decimal(other.lowValue); - } - if (other.isSetHighValue()) { - this.highValue = new Decimal(other.highValue); - } - this.numNulls = other.numNulls; - this.numDVs = other.numDVs; - } - - public DecimalColumnStatsData deepCopy() { - return new DecimalColumnStatsData(this); - } - - @Override - public void clear() { - this.lowValue = null; - this.highValue = null; - setNumNullsIsSet(false); - this.numNulls = 0; - setNumDVsIsSet(false); - this.numDVs = 0; - } - - public Decimal getLowValue() { - return this.lowValue; - } - - public void setLowValue(Decimal lowValue) { - this.lowValue = lowValue; - } - - public void unsetLowValue() { - this.lowValue = null; - } - - /** Returns true if field lowValue is set (has been assigned a value) and false otherwise */ - public boolean isSetLowValue() { - return this.lowValue != null; - } - - public void setLowValueIsSet(boolean value) { - if (!value) { - this.lowValue = null; - } - } - - public Decimal getHighValue() { - return this.highValue; - } - - public void setHighValue(Decimal highValue) { - this.highValue = highValue; - } - - public void unsetHighValue() { - this.highValue = null; - } - - /** Returns true if field highValue is set (has been assigned a value) and false otherwise */ - public boolean isSetHighValue() { - return this.highValue != null; - } - - public void setHighValueIsSet(boolean value) { - if (!value) { - this.highValue = null; - } - } - - public long getNumNulls() { - return this.numNulls; - } - - public void setNumNulls(long numNulls) { - this.numNulls = numNulls; - setNumNullsIsSet(true); - } - - public void unsetNumNulls() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNumNulls() { - return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - public void setNumNullsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value); - } - - public long getNumDVs() { - return this.numDVs; - } - - public void setNumDVs(long numDVs) { - this.numDVs = numDVs; - setNumDVsIsSet(true); - } - - public void unsetNumDVs() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMDVS_ISSET_ID); - } - - /** Returns true if field numDVs is set (has been assigned a value) and false otherwise */ - public boolean isSetNumDVs() { - return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID); - } - - public void setNumDVsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMDVS_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case LOW_VALUE: - if (value == null) { - unsetLowValue(); - } else { - setLowValue((Decimal)value); - } - break; - - case HIGH_VALUE: - if (value == null) { - unsetHighValue(); - } else { - setHighValue((Decimal)value); - } - break; - - case NUM_NULLS: - if (value == null) { - unsetNumNulls(); - } else { - setNumNulls((Long)value); - } - break; - - case NUM_DVS: - if (value == null) { - unsetNumDVs(); - } else { - setNumDVs((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case LOW_VALUE: - return getLowValue(); - - case HIGH_VALUE: - return getHighValue(); - - case NUM_NULLS: - return Long.valueOf(getNumNulls()); - - case NUM_DVS: - return Long.valueOf(getNumDVs()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case LOW_VALUE: - return isSetLowValue(); - case HIGH_VALUE: - return isSetHighValue(); - case NUM_NULLS: - return isSetNumNulls(); - case NUM_DVS: - return isSetNumDVs(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof DecimalColumnStatsData) - return this.equals((DecimalColumnStatsData)that); - return false; - } - - public boolean equals(DecimalColumnStatsData that) { - if (that == null) - return false; - - boolean this_present_lowValue = true && this.isSetLowValue(); - boolean that_present_lowValue = true && that.isSetLowValue(); - if (this_present_lowValue || that_present_lowValue) { - if (!(this_present_lowValue && that_present_lowValue)) - return false; - if (!this.lowValue.equals(that.lowValue)) - return false; - } - - boolean this_present_highValue = true && this.isSetHighValue(); - boolean that_present_highValue = true && that.isSetHighValue(); - if (this_present_highValue || that_present_highValue) { - if (!(this_present_highValue && that_present_highValue)) - return false; - if (!this.highValue.equals(that.highValue)) - return false; - } - - boolean this_present_numNulls = true; - boolean that_present_numNulls = true; - if (this_present_numNulls || that_present_numNulls) { - if (!(this_present_numNulls && that_present_numNulls)) - return false; - if (this.numNulls != that.numNulls) - return false; - } - - boolean this_present_numDVs = true; - boolean that_present_numDVs = true; - if (this_present_numDVs || that_present_numDVs) { - if (!(this_present_numDVs && that_present_numDVs)) - return false; - if (this.numDVs != that.numDVs) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_lowValue = true && (isSetLowValue()); - builder.append(present_lowValue); - if (present_lowValue) - builder.append(lowValue); - - boolean present_highValue = true && (isSetHighValue()); - builder.append(present_highValue); - if (present_highValue) - builder.append(highValue); - - boolean present_numNulls = true; - builder.append(present_numNulls); - if (present_numNulls) - builder.append(numNulls); - - boolean present_numDVs = true; - builder.append(present_numDVs); - if (present_numDVs) - builder.append(numDVs); - - return builder.toHashCode(); - } - - public int compareTo(DecimalColumnStatsData other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - DecimalColumnStatsData typedOther = (DecimalColumnStatsData)other; - - lastComparison = Boolean.valueOf(isSetLowValue()).compareTo(typedOther.isSetLowValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetLowValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowValue, typedOther.lowValue); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetHighValue()).compareTo(typedOther.isSetHighValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHighValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highValue, typedOther.highValue); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(typedOther.isSetNumNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, typedOther.numNulls); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo(typedOther.isSetNumDVs()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumDVs()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDVs, typedOther.numDVs); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("DecimalColumnStatsData("); - boolean first = true; - - if (isSetLowValue()) { - sb.append("lowValue:"); - if (this.lowValue == null) { - sb.append("null"); - } else { - sb.append(this.lowValue); - } - first = false; - } - if (isSetHighValue()) { - if (!first) sb.append(", "); - sb.append("highValue:"); - if (this.highValue == null) { - sb.append("null"); - } else { - sb.append(this.highValue); - } - first = false; - } - if (!first) sb.append(", "); - sb.append("numNulls:"); - sb.append(this.numNulls); - first = false; - if (!first) sb.append(", "); - sb.append("numDVs:"); - sb.append(this.numDVs); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetNumNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString()); - } - - if (!isSetNumDVs()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numDVs' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (lowValue != null) { - lowValue.validate(); - } - if (highValue != null) { - highValue.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class DecimalColumnStatsDataStandardSchemeFactory implements SchemeFactory { - public DecimalColumnStatsDataStandardScheme getScheme() { - return new DecimalColumnStatsDataStandardScheme(); - } - } - - private static class DecimalColumnStatsDataStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, DecimalColumnStatsData struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // LOW_VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.lowValue = new Decimal(); - struct.lowValue.read(iprot); - struct.setLowValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // HIGH_VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.highValue = new Decimal(); - struct.highValue.read(iprot); - struct.setHighValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // NUM_NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // NUM_DVS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numDVs = iprot.readI64(); - struct.setNumDVsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, DecimalColumnStatsData struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.lowValue != null) { - if (struct.isSetLowValue()) { - oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC); - struct.lowValue.write(oprot); - oprot.writeFieldEnd(); - } - } - if (struct.highValue != null) { - if (struct.isSetHighValue()) { - oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC); - struct.highValue.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); - oprot.writeI64(struct.numNulls); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(NUM_DVS_FIELD_DESC); - oprot.writeI64(struct.numDVs); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class DecimalColumnStatsDataTupleSchemeFactory implements SchemeFactory { - public DecimalColumnStatsDataTupleScheme getScheme() { - return new DecimalColumnStatsDataTupleScheme(); - } - } - - private static class DecimalColumnStatsDataTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, DecimalColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI64(struct.numNulls); - oprot.writeI64(struct.numDVs); - BitSet optionals = new BitSet(); - if (struct.isSetLowValue()) { - optionals.set(0); - } - if (struct.isSetHighValue()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetLowValue()) { - struct.lowValue.write(oprot); - } - if (struct.isSetHighValue()) { - struct.highValue.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, DecimalColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - struct.numDVs = iprot.readI64(); - struct.setNumDVsIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.lowValue = new Decimal(); - struct.lowValue.read(iprot); - struct.setLowValueIsSet(true); - } - if (incoming.get(1)) { - struct.highValue = new Decimal(); - struct.highValue.read(iprot); - struct.setHighValueIsSet(true); - } - } - } +public class DecimalColumnStatsData + implements + org.apache.thrift.TBase, + java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct( + "DecimalColumnStatsData"); + + private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField( + "lowValue", org.apache.thrift.protocol.TType.STRUCT, (short) 1); + private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField( + "highValue", org.apache.thrift.protocol.TType.STRUCT, (short) 2); + private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numNulls", org.apache.thrift.protocol.TType.I64, (short) 3); + private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numDVs", org.apache.thrift.protocol.TType.I64, (short) 4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, + new DecimalColumnStatsDataStandardSchemeFactory()); + schemes.put(TupleScheme.class, + new DecimalColumnStatsDataTupleSchemeFactory()); + } + + private Decimal lowValue; // optional + private Decimal highValue; // optional + private long numNulls; // required + private long numDVs; // required + + /** + * The set of fields this struct contains, along with convenience methods + * for finding and manipulating them. + */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + LOW_VALUE((short) 1, "lowValue"), HIGH_VALUE((short) 2, "highValue"), NUM_NULLS( + (short) 3, "numNulls"), NUM_DVS((short) 4, "numDVs"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not + * found. + */ + public static _Fields findByThriftId(int fieldId) { + switch (fieldId) { + case 1: // LOW_VALUE + return LOW_VALUE; + case 2: // HIGH_VALUE + return HIGH_VALUE; + case 3: // NUM_NULLS + return NUM_NULLS; + case 4: // NUM_DVS + return NUM_DVS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) + throw new IllegalArgumentException("Field " + fieldId + + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not + * found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __NUMNULLS_ISSET_ID = 0; + private static final int __NUMDVS_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private _Fields optionals[] = { _Fields.LOW_VALUE, _Fields.HIGH_VALUE }; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>( + _Fields.class); + tmpMap.put(_Fields.LOW_VALUE, + new org.apache.thrift.meta_data.FieldMetaData("lowValue", + org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData( + org.apache.thrift.protocol.TType.STRUCT, + Decimal.class))); + tmpMap.put(_Fields.HIGH_VALUE, + new org.apache.thrift.meta_data.FieldMetaData("highValue", + org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData( + org.apache.thrift.protocol.TType.STRUCT, + Decimal.class))); + tmpMap.put(_Fields.NUM_NULLS, + new org.apache.thrift.meta_data.FieldMetaData("numNulls", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.NUM_DVS, + new org.apache.thrift.meta_data.FieldMetaData("numDVs", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap( + DecimalColumnStatsData.class, metaDataMap); + } + + public DecimalColumnStatsData() { + } + + public DecimalColumnStatsData(long numNulls, long numDVs) { + this(); + this.numNulls = numNulls; + setNumNullsIsSet(true); + this.numDVs = numDVs; + setNumDVsIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public DecimalColumnStatsData(DecimalColumnStatsData other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetLowValue()) { + this.lowValue = new Decimal(other.lowValue); + } + if (other.isSetHighValue()) { + this.highValue = new Decimal(other.highValue); + } + this.numNulls = other.numNulls; + this.numDVs = other.numDVs; + } + + public DecimalColumnStatsData deepCopy() { + return new DecimalColumnStatsData(this); + } + + @Override + public void clear() { + this.lowValue = null; + this.highValue = null; + setNumNullsIsSet(false); + // this.numNulls = 0; + setNumDVsIsSet(false); + // this.numDVs = 0; + } + + public Decimal getLowValue() { + return this.lowValue; + } + + public void setLowValue(Decimal lowValue) { + this.lowValue = lowValue; + } + + public void unsetLowValue() { + this.lowValue = null; + } + + /** + * Returns true if field lowValue is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetLowValue() { + return this.lowValue != null; + } + + public void setLowValueIsSet(boolean value) { + if (!value) { + this.lowValue = null; + } + } + + public Decimal getHighValue() { + return this.highValue; + } + + public void setHighValue(Decimal highValue) { + this.highValue = highValue; + } + + public void unsetHighValue() { + this.highValue = null; + } + + /** + * Returns true if field highValue is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetHighValue() { + return this.highValue != null; + } + + public void setHighValueIsSet(boolean value) { + if (!value) { + this.highValue = null; + } + } + + public Long getNumNulls() { + if (isSetNumNulls()) + return this.numNulls; + else + return null; + } + + public void setNumNulls(long numNulls) { + this.numNulls = numNulls; + setNumNullsIsSet(true); + } + + public void unsetNumNulls() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMNULLS_ISSET_ID); + } + + /** + * Returns true if field numNulls is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetNumNulls() { + return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); + } + + public void setNumNullsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMNULLS_ISSET_ID, value); + } + + public Long getNumDVs() { + if (isSetNumDVs()) + return this.numDVs; + else + return null; + } + + public void setNumDVs(long numDVs) { + this.numDVs = numDVs; + setNumDVsIsSet(true); + } + + public void unsetNumDVs() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMDVS_ISSET_ID); + } + + /** + * Returns true if field numDVs is set (has been assigned a value) and false + * otherwise + */ + public boolean isSetNumDVs() { + return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID); + } + + public void setNumDVsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMDVS_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case LOW_VALUE: + if (value == null) { + unsetLowValue(); + } else { + setLowValue((Decimal) value); + } + break; + + case HIGH_VALUE: + if (value == null) { + unsetHighValue(); + } else { + setHighValue((Decimal) value); + } + break; + + case NUM_NULLS: + if (value == null) { + unsetNumNulls(); + } else { + setNumNulls((Long) value); + } + break; + + case NUM_DVS: + if (value == null) { + unsetNumDVs(); + } else { + setNumDVs((Long) value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case LOW_VALUE: + return getLowValue(); + + case HIGH_VALUE: + return getHighValue(); + + case NUM_NULLS: + return Long.valueOf(getNumNulls()); + + case NUM_DVS: + return Long.valueOf(getNumDVs()); + + } + throw new IllegalStateException(); + } + + /** + * Returns true if field corresponding to fieldID is set (has been assigned + * a value) and false otherwise + */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case LOW_VALUE: + return isSetLowValue(); + case HIGH_VALUE: + return isSetHighValue(); + case NUM_NULLS: + return isSetNumNulls(); + case NUM_DVS: + return isSetNumDVs(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof DecimalColumnStatsData) + return this.equals((DecimalColumnStatsData) that); + return false; + } + + public boolean equals(DecimalColumnStatsData that) { + if (that == null) + return false; + + boolean this_present_lowValue = true && this.isSetLowValue(); + boolean that_present_lowValue = true && that.isSetLowValue(); + if (this_present_lowValue || that_present_lowValue) { + if (!(this_present_lowValue && that_present_lowValue)) + return false; + if (!this.lowValue.equals(that.lowValue)) + return false; + } + + boolean this_present_highValue = true && this.isSetHighValue(); + boolean that_present_highValue = true && that.isSetHighValue(); + if (this_present_highValue || that_present_highValue) { + if (!(this_present_highValue && that_present_highValue)) + return false; + if (!this.highValue.equals(that.highValue)) + return false; + } + + boolean this_present_numNulls = true; + boolean that_present_numNulls = true; + if (this_present_numNulls || that_present_numNulls) { + if (!(this_present_numNulls && that_present_numNulls)) + return false; + if (this.numNulls != that.numNulls) + return false; + } + + boolean this_present_numDVs = true; + boolean that_present_numDVs = true; + if (this_present_numDVs || that_present_numDVs) { + if (!(this_present_numDVs && that_present_numDVs)) + return false; + if (this.numDVs != that.numDVs) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_lowValue = true && (isSetLowValue()); + builder.append(present_lowValue); + if (present_lowValue) + builder.append(lowValue); + + boolean present_highValue = true && (isSetHighValue()); + builder.append(present_highValue); + if (present_highValue) + builder.append(highValue); + + boolean present_numNulls = true; + builder.append(present_numNulls); + if (present_numNulls) + builder.append(numNulls); + + boolean present_numDVs = true; + builder.append(present_numDVs); + if (present_numDVs) + builder.append(numDVs); + + return builder.toHashCode(); + } + + public int compareTo(DecimalColumnStatsData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + DecimalColumnStatsData typedOther = (DecimalColumnStatsData) other; + + lastComparison = Boolean.valueOf(isSetLowValue()).compareTo( + typedOther.isSetLowValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLowValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.lowValue, typedOther.lowValue); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetHighValue()).compareTo( + typedOther.isSetHighValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetHighValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.highValue, typedOther.highValue); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo( + typedOther.isSetNumNulls()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumNulls()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numNulls, typedOther.numNulls); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo( + typedOther.isSetNumDVs()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumDVs()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numDVs, typedOther.numDVs); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) + throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) + throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("DecimalColumnStatsData("); + boolean first = true; + + if (isSetLowValue()) { + sb.append("lowValue:"); + if (this.lowValue == null) { + sb.append("null"); + } else { + sb.append(this.lowValue); + } + first = false; + } + if (isSetHighValue()) { + if (!first) + sb.append(", "); + sb.append("highValue:"); + if (this.highValue == null) { + sb.append("null"); + } else { + sb.append(this.highValue); + } + first = false; + } + if (!first) + sb.append(", "); + sb.append("numNulls:"); + sb.append(this.numNulls); + first = false; + if (!first) + sb.append(", "); + sb.append("numDVs:"); + sb.append(this.numDVs); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetNumNulls()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numNulls' is unset! Struct:" + toString()); + } + + if (!isSetNumDVs()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numDVs' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (lowValue != null) { + lowValue.validate(); + } + if (highValue != null) { + highValue.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) + throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) + throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java + // serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class DecimalColumnStatsDataStandardSchemeFactory implements + SchemeFactory { + public DecimalColumnStatsDataStandardScheme getScheme() { + return new DecimalColumnStatsDataStandardScheme(); + } + } + + private static class DecimalColumnStatsDataStandardScheme extends + StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, + DecimalColumnStatsData struct) + throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // LOW_VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.lowValue = new Decimal(); + struct.lowValue.read(iprot); + struct.setLowValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 2: // HIGH_VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.highValue = new Decimal(); + struct.highValue.read(iprot); + struct.setHighValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 3: // NUM_NULLS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 4: // NUM_DVS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numDVs = iprot.readI64(); + struct.setNumDVsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, + DecimalColumnStatsData struct) + throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.lowValue != null) { + if (struct.isSetLowValue()) { + oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC); + struct.lowValue.write(oprot); + oprot.writeFieldEnd(); + } + } + if (struct.highValue != null) { + if (struct.isSetHighValue()) { + oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC); + struct.highValue.write(oprot); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); + oprot.writeI64(struct.numNulls); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(NUM_DVS_FIELD_DESC); + oprot.writeI64(struct.numDVs); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class DecimalColumnStatsDataTupleSchemeFactory implements + SchemeFactory { + public DecimalColumnStatsDataTupleScheme getScheme() { + return new DecimalColumnStatsDataTupleScheme(); + } + } + + private static class DecimalColumnStatsDataTupleScheme extends + TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, + DecimalColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.numNulls); + oprot.writeI64(struct.numDVs); + BitSet optionals = new BitSet(); + if (struct.isSetLowValue()) { + optionals.set(0); + } + if (struct.isSetHighValue()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetLowValue()) { + struct.lowValue.write(oprot); + } + if (struct.isSetHighValue()) { + struct.highValue.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, + DecimalColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + struct.numDVs = iprot.readI64(); + struct.setNumDVsIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.lowValue = new Decimal(); + struct.lowValue.read(iprot); + struct.setLowValueIsSet(true); + } + if (incoming.get(1)) { + struct.highValue = new Decimal(); + struct.highValue.read(iprot); + struct.setHighValueIsSet(true); + } + } + } } - diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java index 4203fd8..d231cb7 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java @@ -31,650 +31,746 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class DoubleColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DoubleColumnStatsData"); - - private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.DOUBLE, (short)1); - private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("highValue", org.apache.thrift.protocol.TType.DOUBLE, (short)2); - private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3); - private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField("numDVs", org.apache.thrift.protocol.TType.I64, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new DoubleColumnStatsDataStandardSchemeFactory()); - schemes.put(TupleScheme.class, new DoubleColumnStatsDataTupleSchemeFactory()); - } - - private double lowValue; // optional - private double highValue; // optional - private long numNulls; // required - private long numDVs; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - LOW_VALUE((short)1, "lowValue"), - HIGH_VALUE((short)2, "highValue"), - NUM_NULLS((short)3, "numNulls"), - NUM_DVS((short)4, "numDVs"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // LOW_VALUE - return LOW_VALUE; - case 2: // HIGH_VALUE - return HIGH_VALUE; - case 3: // NUM_NULLS - return NUM_NULLS; - case 4: // NUM_DVS - return NUM_DVS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __LOWVALUE_ISSET_ID = 0; - private static final int __HIGHVALUE_ISSET_ID = 1; - private static final int __NUMNULLS_ISSET_ID = 2; - private static final int __NUMDVS_ISSET_ID = 3; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.LOW_VALUE,_Fields.HIGH_VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.LOW_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lowValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - tmpMap.put(_Fields.HIGH_VALUE, new org.apache.thrift.meta_data.FieldMetaData("highValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.NUM_DVS, new org.apache.thrift.meta_data.FieldMetaData("numDVs", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DoubleColumnStatsData.class, metaDataMap); - } - - public DoubleColumnStatsData() { - } - - public DoubleColumnStatsData( - long numNulls, - long numDVs) - { - this(); - this.numNulls = numNulls; - setNumNullsIsSet(true); - this.numDVs = numDVs; - setNumDVsIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public DoubleColumnStatsData(DoubleColumnStatsData other) { - __isset_bitfield = other.__isset_bitfield; - this.lowValue = other.lowValue; - this.highValue = other.highValue; - this.numNulls = other.numNulls; - this.numDVs = other.numDVs; - } - - public DoubleColumnStatsData deepCopy() { - return new DoubleColumnStatsData(this); - } - - @Override - public void clear() { - setLowValueIsSet(false); - this.lowValue = 0.0; - setHighValueIsSet(false); - this.highValue = 0.0; - setNumNullsIsSet(false); - this.numNulls = 0; - setNumDVsIsSet(false); - this.numDVs = 0; - } - - public double getLowValue() { - return this.lowValue; - } - - public void setLowValue(double lowValue) { - this.lowValue = lowValue; - setLowValueIsSet(true); - } - - public void unsetLowValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOWVALUE_ISSET_ID); - } - - /** Returns true if field lowValue is set (has been assigned a value) and false otherwise */ - public boolean isSetLowValue() { - return EncodingUtils.testBit(__isset_bitfield, __LOWVALUE_ISSET_ID); - } - - public void setLowValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOWVALUE_ISSET_ID, value); - } - - public double getHighValue() { - return this.highValue; - } - - public void setHighValue(double highValue) { - this.highValue = highValue; - setHighValueIsSet(true); - } - - public void unsetHighValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HIGHVALUE_ISSET_ID); - } - - /** Returns true if field highValue is set (has been assigned a value) and false otherwise */ - public boolean isSetHighValue() { - return EncodingUtils.testBit(__isset_bitfield, __HIGHVALUE_ISSET_ID); - } - - public void setHighValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHVALUE_ISSET_ID, value); - } - - public long getNumNulls() { - return this.numNulls; - } - - public void setNumNulls(long numNulls) { - this.numNulls = numNulls; - setNumNullsIsSet(true); - } - - public void unsetNumNulls() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNumNulls() { - return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - public void setNumNullsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value); - } - - public long getNumDVs() { - return this.numDVs; - } - - public void setNumDVs(long numDVs) { - this.numDVs = numDVs; - setNumDVsIsSet(true); - } - - public void unsetNumDVs() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMDVS_ISSET_ID); - } - - /** Returns true if field numDVs is set (has been assigned a value) and false otherwise */ - public boolean isSetNumDVs() { - return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID); - } - - public void setNumDVsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMDVS_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case LOW_VALUE: - if (value == null) { - unsetLowValue(); - } else { - setLowValue((Double)value); - } - break; - - case HIGH_VALUE: - if (value == null) { - unsetHighValue(); - } else { - setHighValue((Double)value); - } - break; - - case NUM_NULLS: - if (value == null) { - unsetNumNulls(); - } else { - setNumNulls((Long)value); - } - break; - - case NUM_DVS: - if (value == null) { - unsetNumDVs(); - } else { - setNumDVs((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case LOW_VALUE: - return Double.valueOf(getLowValue()); - - case HIGH_VALUE: - return Double.valueOf(getHighValue()); - - case NUM_NULLS: - return Long.valueOf(getNumNulls()); - - case NUM_DVS: - return Long.valueOf(getNumDVs()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case LOW_VALUE: - return isSetLowValue(); - case HIGH_VALUE: - return isSetHighValue(); - case NUM_NULLS: - return isSetNumNulls(); - case NUM_DVS: - return isSetNumDVs(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof DoubleColumnStatsData) - return this.equals((DoubleColumnStatsData)that); - return false; - } - - public boolean equals(DoubleColumnStatsData that) { - if (that == null) - return false; - - boolean this_present_lowValue = true && this.isSetLowValue(); - boolean that_present_lowValue = true && that.isSetLowValue(); - if (this_present_lowValue || that_present_lowValue) { - if (!(this_present_lowValue && that_present_lowValue)) - return false; - if (this.lowValue != that.lowValue) - return false; - } - - boolean this_present_highValue = true && this.isSetHighValue(); - boolean that_present_highValue = true && that.isSetHighValue(); - if (this_present_highValue || that_present_highValue) { - if (!(this_present_highValue && that_present_highValue)) - return false; - if (this.highValue != that.highValue) - return false; - } - - boolean this_present_numNulls = true; - boolean that_present_numNulls = true; - if (this_present_numNulls || that_present_numNulls) { - if (!(this_present_numNulls && that_present_numNulls)) - return false; - if (this.numNulls != that.numNulls) - return false; - } - - boolean this_present_numDVs = true; - boolean that_present_numDVs = true; - if (this_present_numDVs || that_present_numDVs) { - if (!(this_present_numDVs && that_present_numDVs)) - return false; - if (this.numDVs != that.numDVs) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_lowValue = true && (isSetLowValue()); - builder.append(present_lowValue); - if (present_lowValue) - builder.append(lowValue); - - boolean present_highValue = true && (isSetHighValue()); - builder.append(present_highValue); - if (present_highValue) - builder.append(highValue); - - boolean present_numNulls = true; - builder.append(present_numNulls); - if (present_numNulls) - builder.append(numNulls); - - boolean present_numDVs = true; - builder.append(present_numDVs); - if (present_numDVs) - builder.append(numDVs); - - return builder.toHashCode(); - } - - public int compareTo(DoubleColumnStatsData other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - DoubleColumnStatsData typedOther = (DoubleColumnStatsData)other; - - lastComparison = Boolean.valueOf(isSetLowValue()).compareTo(typedOther.isSetLowValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetLowValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowValue, typedOther.lowValue); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetHighValue()).compareTo(typedOther.isSetHighValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHighValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highValue, typedOther.highValue); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(typedOther.isSetNumNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, typedOther.numNulls); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo(typedOther.isSetNumDVs()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumDVs()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDVs, typedOther.numDVs); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("DoubleColumnStatsData("); - boolean first = true; - - if (isSetLowValue()) { - sb.append("lowValue:"); - sb.append(this.lowValue); - first = false; - } - if (isSetHighValue()) { - if (!first) sb.append(", "); - sb.append("highValue:"); - sb.append(this.highValue); - first = false; - } - if (!first) sb.append(", "); - sb.append("numNulls:"); - sb.append(this.numNulls); - first = false; - if (!first) sb.append(", "); - sb.append("numDVs:"); - sb.append(this.numDVs); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetNumNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString()); - } - - if (!isSetNumDVs()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numDVs' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class DoubleColumnStatsDataStandardSchemeFactory implements SchemeFactory { - public DoubleColumnStatsDataStandardScheme getScheme() { - return new DoubleColumnStatsDataStandardScheme(); - } - } - - private static class DoubleColumnStatsDataStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, DoubleColumnStatsData struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // LOW_VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.lowValue = iprot.readDouble(); - struct.setLowValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // HIGH_VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.highValue = iprot.readDouble(); - struct.setHighValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // NUM_NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // NUM_DVS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numDVs = iprot.readI64(); - struct.setNumDVsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, DoubleColumnStatsData struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetLowValue()) { - oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC); - oprot.writeDouble(struct.lowValue); - oprot.writeFieldEnd(); - } - if (struct.isSetHighValue()) { - oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC); - oprot.writeDouble(struct.highValue); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); - oprot.writeI64(struct.numNulls); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(NUM_DVS_FIELD_DESC); - oprot.writeI64(struct.numDVs); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class DoubleColumnStatsDataTupleSchemeFactory implements SchemeFactory { - public DoubleColumnStatsDataTupleScheme getScheme() { - return new DoubleColumnStatsDataTupleScheme(); - } - } - - private static class DoubleColumnStatsDataTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, DoubleColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI64(struct.numNulls); - oprot.writeI64(struct.numDVs); - BitSet optionals = new BitSet(); - if (struct.isSetLowValue()) { - optionals.set(0); - } - if (struct.isSetHighValue()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetLowValue()) { - oprot.writeDouble(struct.lowValue); - } - if (struct.isSetHighValue()) { - oprot.writeDouble(struct.highValue); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, DoubleColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - struct.numDVs = iprot.readI64(); - struct.setNumDVsIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.lowValue = iprot.readDouble(); - struct.setLowValueIsSet(true); - } - if (incoming.get(1)) { - struct.highValue = iprot.readDouble(); - struct.setHighValueIsSet(true); - } - } - } +public class DoubleColumnStatsData + implements + org.apache.thrift.TBase, + java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct( + "DoubleColumnStatsData"); + + private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField( + "lowValue", org.apache.thrift.protocol.TType.DOUBLE, (short) 1); + private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField( + "highValue", org.apache.thrift.protocol.TType.DOUBLE, (short) 2); + private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numNulls", org.apache.thrift.protocol.TType.I64, (short) 3); + private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numDVs", org.apache.thrift.protocol.TType.I64, (short) 4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, + new DoubleColumnStatsDataStandardSchemeFactory()); + schemes.put(TupleScheme.class, + new DoubleColumnStatsDataTupleSchemeFactory()); + } + + private double lowValue; // optional + private double highValue; // optional + private long numNulls; // required + private long numDVs; // required + + /** + * The set of fields this struct contains, along with convenience methods + * for finding and manipulating them. + */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + LOW_VALUE((short) 1, "lowValue"), HIGH_VALUE((short) 2, "highValue"), NUM_NULLS( + (short) 3, "numNulls"), NUM_DVS((short) 4, "numDVs"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not + * found. + */ + public static _Fields findByThriftId(int fieldId) { + switch (fieldId) { + case 1: // LOW_VALUE + return LOW_VALUE; + case 2: // HIGH_VALUE + return HIGH_VALUE; + case 3: // NUM_NULLS + return NUM_NULLS; + case 4: // NUM_DVS + return NUM_DVS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) + throw new IllegalArgumentException("Field " + fieldId + + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not + * found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __LOWVALUE_ISSET_ID = 0; + private static final int __HIGHVALUE_ISSET_ID = 1; + private static final int __NUMNULLS_ISSET_ID = 2; + private static final int __NUMDVS_ISSET_ID = 3; + private byte __isset_bitfield = 0; + private _Fields optionals[] = { _Fields.LOW_VALUE, _Fields.HIGH_VALUE }; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>( + _Fields.class); + tmpMap.put(_Fields.LOW_VALUE, + new org.apache.thrift.meta_data.FieldMetaData("lowValue", + org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.DOUBLE))); + tmpMap.put(_Fields.HIGH_VALUE, + new org.apache.thrift.meta_data.FieldMetaData("highValue", + org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.DOUBLE))); + tmpMap.put(_Fields.NUM_NULLS, + new org.apache.thrift.meta_data.FieldMetaData("numNulls", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.NUM_DVS, + new org.apache.thrift.meta_data.FieldMetaData("numDVs", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap( + DoubleColumnStatsData.class, metaDataMap); + } + + public DoubleColumnStatsData() { + } + + public DoubleColumnStatsData(long numNulls, long numDVs) { + this(); + this.numNulls = numNulls; + setNumNullsIsSet(true); + this.numDVs = numDVs; + setNumDVsIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public DoubleColumnStatsData(DoubleColumnStatsData other) { + __isset_bitfield = other.__isset_bitfield; + this.lowValue = other.lowValue; + this.highValue = other.highValue; + this.numNulls = other.numNulls; + this.numDVs = other.numDVs; + } + + public DoubleColumnStatsData deepCopy() { + return new DoubleColumnStatsData(this); + } + + @Override + public void clear() { + setLowValueIsSet(false); + // this.lowValue = 0.0; + setHighValueIsSet(false); + // this.highValue = 0.0; + setNumNullsIsSet(false); + // this.numNulls = 0; + setNumDVsIsSet(false); + // this.numDVs = 0; + } + + public Double getLowValue() { + if (isSetLowValue()) + return this.lowValue; + else + return null; + } + + public void setLowValue(double lowValue) { + this.lowValue = lowValue; + setLowValueIsSet(true); + } + + public void unsetLowValue() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __LOWVALUE_ISSET_ID); + } + + /** + * Returns true if field lowValue is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetLowValue() { + return EncodingUtils.testBit(__isset_bitfield, __LOWVALUE_ISSET_ID); + } + + public void setLowValueIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __LOWVALUE_ISSET_ID, value); + } + + public Double getHighValue() { + if (isSetHighValue()) + return this.highValue; + else + return null; + } + + public void setHighValue(double highValue) { + this.highValue = highValue; + setHighValueIsSet(true); + } + + public void unsetHighValue() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __HIGHVALUE_ISSET_ID); + } + + /** + * Returns true if field highValue is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetHighValue() { + return EncodingUtils.testBit(__isset_bitfield, __HIGHVALUE_ISSET_ID); + } + + public void setHighValueIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __HIGHVALUE_ISSET_ID, value); + } + + public Long getNumNulls() { + if (isSetNumNulls()) + return this.numNulls; + else + return null; + } + + public void setNumNulls(long numNulls) { + this.numNulls = numNulls; + setNumNullsIsSet(true); + } + + public void unsetNumNulls() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMNULLS_ISSET_ID); + } + + /** + * Returns true if field numNulls is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetNumNulls() { + return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); + } + + public void setNumNullsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMNULLS_ISSET_ID, value); + } + + public Long getNumDVs() { + if (isSetNumDVs()) + return this.numDVs; + else + return null; + } + + public void setNumDVs(long numDVs) { + this.numDVs = numDVs; + setNumDVsIsSet(true); + } + + public void unsetNumDVs() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMDVS_ISSET_ID); + } + + /** + * Returns true if field numDVs is set (has been assigned a value) and false + * otherwise + */ + public boolean isSetNumDVs() { + return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID); + } + + public void setNumDVsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMDVS_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case LOW_VALUE: + if (value == null) { + unsetLowValue(); + } else { + setLowValue((Double) value); + } + break; + + case HIGH_VALUE: + if (value == null) { + unsetHighValue(); + } else { + setHighValue((Double) value); + } + break; + + case NUM_NULLS: + if (value == null) { + unsetNumNulls(); + } else { + setNumNulls((Long) value); + } + break; + + case NUM_DVS: + if (value == null) { + unsetNumDVs(); + } else { + setNumDVs((Long) value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case LOW_VALUE: + return Double.valueOf(getLowValue()); + + case HIGH_VALUE: + return Double.valueOf(getHighValue()); + + case NUM_NULLS: + return Long.valueOf(getNumNulls()); + + case NUM_DVS: + return Long.valueOf(getNumDVs()); + + } + throw new IllegalStateException(); + } + + /** + * Returns true if field corresponding to fieldID is set (has been assigned + * a value) and false otherwise + */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case LOW_VALUE: + return isSetLowValue(); + case HIGH_VALUE: + return isSetHighValue(); + case NUM_NULLS: + return isSetNumNulls(); + case NUM_DVS: + return isSetNumDVs(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof DoubleColumnStatsData) + return this.equals((DoubleColumnStatsData) that); + return false; + } + + public boolean equals(DoubleColumnStatsData that) { + if (that == null) + return false; + + boolean this_present_lowValue = true && this.isSetLowValue(); + boolean that_present_lowValue = true && that.isSetLowValue(); + if (this_present_lowValue || that_present_lowValue) { + if (!(this_present_lowValue && that_present_lowValue)) + return false; + if (this.lowValue != that.lowValue) + return false; + } + + boolean this_present_highValue = true && this.isSetHighValue(); + boolean that_present_highValue = true && that.isSetHighValue(); + if (this_present_highValue || that_present_highValue) { + if (!(this_present_highValue && that_present_highValue)) + return false; + if (this.highValue != that.highValue) + return false; + } + + boolean this_present_numNulls = true; + boolean that_present_numNulls = true; + if (this_present_numNulls || that_present_numNulls) { + if (!(this_present_numNulls && that_present_numNulls)) + return false; + if (this.numNulls != that.numNulls) + return false; + } + + boolean this_present_numDVs = true; + boolean that_present_numDVs = true; + if (this_present_numDVs || that_present_numDVs) { + if (!(this_present_numDVs && that_present_numDVs)) + return false; + if (this.numDVs != that.numDVs) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_lowValue = true && (isSetLowValue()); + builder.append(present_lowValue); + if (present_lowValue) + builder.append(lowValue); + + boolean present_highValue = true && (isSetHighValue()); + builder.append(present_highValue); + if (present_highValue) + builder.append(highValue); + + boolean present_numNulls = true; + builder.append(present_numNulls); + if (present_numNulls) + builder.append(numNulls); + + boolean present_numDVs = true; + builder.append(present_numDVs); + if (present_numDVs) + builder.append(numDVs); + + return builder.toHashCode(); + } + + public int compareTo(DoubleColumnStatsData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + DoubleColumnStatsData typedOther = (DoubleColumnStatsData) other; + + lastComparison = Boolean.valueOf(isSetLowValue()).compareTo( + typedOther.isSetLowValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLowValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.lowValue, typedOther.lowValue); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetHighValue()).compareTo( + typedOther.isSetHighValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetHighValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.highValue, typedOther.highValue); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo( + typedOther.isSetNumNulls()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumNulls()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numNulls, typedOther.numNulls); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo( + typedOther.isSetNumDVs()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumDVs()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numDVs, typedOther.numDVs); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) + throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) + throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("DoubleColumnStatsData("); + boolean first = true; + + if (isSetLowValue()) { + sb.append("lowValue:"); + sb.append(this.lowValue); + first = false; + } + if (isSetHighValue()) { + if (!first) + sb.append(", "); + sb.append("highValue:"); + sb.append(this.highValue); + first = false; + } + if (!first) + sb.append(", "); + sb.append("numNulls:"); + sb.append(this.numNulls); + first = false; + if (!first) + sb.append(", "); + sb.append("numDVs:"); + sb.append(this.numDVs); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetNumNulls()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numNulls' is unset! Struct:" + toString()); + } + + if (!isSetNumDVs()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numDVs' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) + throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) + throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java + // serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class DoubleColumnStatsDataStandardSchemeFactory implements + SchemeFactory { + public DoubleColumnStatsDataStandardScheme getScheme() { + return new DoubleColumnStatsDataStandardScheme(); + } + } + + private static class DoubleColumnStatsDataStandardScheme extends + StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, + DoubleColumnStatsData struct) + throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // LOW_VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { + struct.lowValue = iprot.readDouble(); + struct.setLowValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 2: // HIGH_VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { + struct.highValue = iprot.readDouble(); + struct.setHighValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 3: // NUM_NULLS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 4: // NUM_DVS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numDVs = iprot.readI64(); + struct.setNumDVsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, + DoubleColumnStatsData struct) + throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetLowValue()) { + oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC); + oprot.writeDouble(struct.lowValue); + oprot.writeFieldEnd(); + } + if (struct.isSetHighValue()) { + oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC); + oprot.writeDouble(struct.highValue); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); + oprot.writeI64(struct.numNulls); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(NUM_DVS_FIELD_DESC); + oprot.writeI64(struct.numDVs); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class DoubleColumnStatsDataTupleSchemeFactory implements + SchemeFactory { + public DoubleColumnStatsDataTupleScheme getScheme() { + return new DoubleColumnStatsDataTupleScheme(); + } + } + + private static class DoubleColumnStatsDataTupleScheme extends + TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, + DoubleColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.numNulls); + oprot.writeI64(struct.numDVs); + BitSet optionals = new BitSet(); + if (struct.isSetLowValue()) { + optionals.set(0); + } + if (struct.isSetHighValue()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetLowValue()) { + oprot.writeDouble(struct.lowValue); + } + if (struct.isSetHighValue()) { + oprot.writeDouble(struct.highValue); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, + DoubleColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + struct.numDVs = iprot.readI64(); + struct.setNumDVsIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.lowValue = iprot.readDouble(); + struct.setLowValueIsSet(true); + } + if (incoming.get(1)) { + struct.highValue = iprot.readDouble(); + struct.setHighValueIsSet(true); + } + } + } } - diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java index d817d46..1c43398 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java @@ -31,650 +31,742 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class LongColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LongColumnStatsData"); - - private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.I64, (short)1); - private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("highValue", org.apache.thrift.protocol.TType.I64, (short)2); - private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3); - private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField("numDVs", org.apache.thrift.protocol.TType.I64, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new LongColumnStatsDataStandardSchemeFactory()); - schemes.put(TupleScheme.class, new LongColumnStatsDataTupleSchemeFactory()); - } - - private long lowValue; // optional - private long highValue; // optional - private long numNulls; // required - private long numDVs; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - LOW_VALUE((short)1, "lowValue"), - HIGH_VALUE((short)2, "highValue"), - NUM_NULLS((short)3, "numNulls"), - NUM_DVS((short)4, "numDVs"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // LOW_VALUE - return LOW_VALUE; - case 2: // HIGH_VALUE - return HIGH_VALUE; - case 3: // NUM_NULLS - return NUM_NULLS; - case 4: // NUM_DVS - return NUM_DVS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __LOWVALUE_ISSET_ID = 0; - private static final int __HIGHVALUE_ISSET_ID = 1; - private static final int __NUMNULLS_ISSET_ID = 2; - private static final int __NUMDVS_ISSET_ID = 3; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.LOW_VALUE,_Fields.HIGH_VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.LOW_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lowValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.HIGH_VALUE, new org.apache.thrift.meta_data.FieldMetaData("highValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.NUM_DVS, new org.apache.thrift.meta_data.FieldMetaData("numDVs", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LongColumnStatsData.class, metaDataMap); - } - - public LongColumnStatsData() { - } - - public LongColumnStatsData( - long numNulls, - long numDVs) - { - this(); - this.numNulls = numNulls; - setNumNullsIsSet(true); - this.numDVs = numDVs; - setNumDVsIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public LongColumnStatsData(LongColumnStatsData other) { - __isset_bitfield = other.__isset_bitfield; - this.lowValue = other.lowValue; - this.highValue = other.highValue; - this.numNulls = other.numNulls; - this.numDVs = other.numDVs; - } - - public LongColumnStatsData deepCopy() { - return new LongColumnStatsData(this); - } - - @Override - public void clear() { - setLowValueIsSet(false); - this.lowValue = 0; - setHighValueIsSet(false); - this.highValue = 0; - setNumNullsIsSet(false); - this.numNulls = 0; - setNumDVsIsSet(false); - this.numDVs = 0; - } - - public long getLowValue() { - return this.lowValue; - } - - public void setLowValue(long lowValue) { - this.lowValue = lowValue; - setLowValueIsSet(true); - } - - public void unsetLowValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOWVALUE_ISSET_ID); - } - - /** Returns true if field lowValue is set (has been assigned a value) and false otherwise */ - public boolean isSetLowValue() { - return EncodingUtils.testBit(__isset_bitfield, __LOWVALUE_ISSET_ID); - } - - public void setLowValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOWVALUE_ISSET_ID, value); - } - - public long getHighValue() { - return this.highValue; - } - - public void setHighValue(long highValue) { - this.highValue = highValue; - setHighValueIsSet(true); - } - - public void unsetHighValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HIGHVALUE_ISSET_ID); - } - - /** Returns true if field highValue is set (has been assigned a value) and false otherwise */ - public boolean isSetHighValue() { - return EncodingUtils.testBit(__isset_bitfield, __HIGHVALUE_ISSET_ID); - } - - public void setHighValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHVALUE_ISSET_ID, value); - } - - public long getNumNulls() { - return this.numNulls; - } - - public void setNumNulls(long numNulls) { - this.numNulls = numNulls; - setNumNullsIsSet(true); - } - - public void unsetNumNulls() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNumNulls() { - return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - public void setNumNullsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value); - } - - public long getNumDVs() { - return this.numDVs; - } - - public void setNumDVs(long numDVs) { - this.numDVs = numDVs; - setNumDVsIsSet(true); - } - - public void unsetNumDVs() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMDVS_ISSET_ID); - } - - /** Returns true if field numDVs is set (has been assigned a value) and false otherwise */ - public boolean isSetNumDVs() { - return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID); - } - - public void setNumDVsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMDVS_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case LOW_VALUE: - if (value == null) { - unsetLowValue(); - } else { - setLowValue((Long)value); - } - break; - - case HIGH_VALUE: - if (value == null) { - unsetHighValue(); - } else { - setHighValue((Long)value); - } - break; - - case NUM_NULLS: - if (value == null) { - unsetNumNulls(); - } else { - setNumNulls((Long)value); - } - break; - - case NUM_DVS: - if (value == null) { - unsetNumDVs(); - } else { - setNumDVs((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case LOW_VALUE: - return Long.valueOf(getLowValue()); - - case HIGH_VALUE: - return Long.valueOf(getHighValue()); - - case NUM_NULLS: - return Long.valueOf(getNumNulls()); - - case NUM_DVS: - return Long.valueOf(getNumDVs()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case LOW_VALUE: - return isSetLowValue(); - case HIGH_VALUE: - return isSetHighValue(); - case NUM_NULLS: - return isSetNumNulls(); - case NUM_DVS: - return isSetNumDVs(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof LongColumnStatsData) - return this.equals((LongColumnStatsData)that); - return false; - } - - public boolean equals(LongColumnStatsData that) { - if (that == null) - return false; - - boolean this_present_lowValue = true && this.isSetLowValue(); - boolean that_present_lowValue = true && that.isSetLowValue(); - if (this_present_lowValue || that_present_lowValue) { - if (!(this_present_lowValue && that_present_lowValue)) - return false; - if (this.lowValue != that.lowValue) - return false; - } - - boolean this_present_highValue = true && this.isSetHighValue(); - boolean that_present_highValue = true && that.isSetHighValue(); - if (this_present_highValue || that_present_highValue) { - if (!(this_present_highValue && that_present_highValue)) - return false; - if (this.highValue != that.highValue) - return false; - } - - boolean this_present_numNulls = true; - boolean that_present_numNulls = true; - if (this_present_numNulls || that_present_numNulls) { - if (!(this_present_numNulls && that_present_numNulls)) - return false; - if (this.numNulls != that.numNulls) - return false; - } - - boolean this_present_numDVs = true; - boolean that_present_numDVs = true; - if (this_present_numDVs || that_present_numDVs) { - if (!(this_present_numDVs && that_present_numDVs)) - return false; - if (this.numDVs != that.numDVs) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_lowValue = true && (isSetLowValue()); - builder.append(present_lowValue); - if (present_lowValue) - builder.append(lowValue); - - boolean present_highValue = true && (isSetHighValue()); - builder.append(present_highValue); - if (present_highValue) - builder.append(highValue); - - boolean present_numNulls = true; - builder.append(present_numNulls); - if (present_numNulls) - builder.append(numNulls); - - boolean present_numDVs = true; - builder.append(present_numDVs); - if (present_numDVs) - builder.append(numDVs); - - return builder.toHashCode(); - } - - public int compareTo(LongColumnStatsData other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - LongColumnStatsData typedOther = (LongColumnStatsData)other; - - lastComparison = Boolean.valueOf(isSetLowValue()).compareTo(typedOther.isSetLowValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetLowValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowValue, typedOther.lowValue); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetHighValue()).compareTo(typedOther.isSetHighValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHighValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highValue, typedOther.highValue); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(typedOther.isSetNumNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, typedOther.numNulls); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo(typedOther.isSetNumDVs()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumDVs()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDVs, typedOther.numDVs); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("LongColumnStatsData("); - boolean first = true; - - if (isSetLowValue()) { - sb.append("lowValue:"); - sb.append(this.lowValue); - first = false; - } - if (isSetHighValue()) { - if (!first) sb.append(", "); - sb.append("highValue:"); - sb.append(this.highValue); - first = false; - } - if (!first) sb.append(", "); - sb.append("numNulls:"); - sb.append(this.numNulls); - first = false; - if (!first) sb.append(", "); - sb.append("numDVs:"); - sb.append(this.numDVs); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetNumNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString()); - } - - if (!isSetNumDVs()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numDVs' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class LongColumnStatsDataStandardSchemeFactory implements SchemeFactory { - public LongColumnStatsDataStandardScheme getScheme() { - return new LongColumnStatsDataStandardScheme(); - } - } - - private static class LongColumnStatsDataStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, LongColumnStatsData struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // LOW_VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.lowValue = iprot.readI64(); - struct.setLowValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // HIGH_VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.highValue = iprot.readI64(); - struct.setHighValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // NUM_NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // NUM_DVS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numDVs = iprot.readI64(); - struct.setNumDVsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, LongColumnStatsData struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetLowValue()) { - oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC); - oprot.writeI64(struct.lowValue); - oprot.writeFieldEnd(); - } - if (struct.isSetHighValue()) { - oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC); - oprot.writeI64(struct.highValue); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); - oprot.writeI64(struct.numNulls); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(NUM_DVS_FIELD_DESC); - oprot.writeI64(struct.numDVs); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class LongColumnStatsDataTupleSchemeFactory implements SchemeFactory { - public LongColumnStatsDataTupleScheme getScheme() { - return new LongColumnStatsDataTupleScheme(); - } - } - - private static class LongColumnStatsDataTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, LongColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI64(struct.numNulls); - oprot.writeI64(struct.numDVs); - BitSet optionals = new BitSet(); - if (struct.isSetLowValue()) { - optionals.set(0); - } - if (struct.isSetHighValue()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetLowValue()) { - oprot.writeI64(struct.lowValue); - } - if (struct.isSetHighValue()) { - oprot.writeI64(struct.highValue); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, LongColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - struct.numDVs = iprot.readI64(); - struct.setNumDVsIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.lowValue = iprot.readI64(); - struct.setLowValueIsSet(true); - } - if (incoming.get(1)) { - struct.highValue = iprot.readI64(); - struct.setHighValueIsSet(true); - } - } - } +public class LongColumnStatsData + implements + org.apache.thrift.TBase, + java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct( + "LongColumnStatsData"); + + private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField( + "lowValue", org.apache.thrift.protocol.TType.I64, (short) 1); + private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField( + "highValue", org.apache.thrift.protocol.TType.I64, (short) 2); + private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numNulls", org.apache.thrift.protocol.TType.I64, (short) 3); + private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numDVs", org.apache.thrift.protocol.TType.I64, (short) 4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, + new LongColumnStatsDataStandardSchemeFactory()); + schemes.put(TupleScheme.class, + new LongColumnStatsDataTupleSchemeFactory()); + } + + private long lowValue; // optional + private long highValue; // optional + private long numNulls; // required + private long numDVs; // required + + /** + * The set of fields this struct contains, along with convenience methods + * for finding and manipulating them. + */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + LOW_VALUE((short) 1, "lowValue"), HIGH_VALUE((short) 2, "highValue"), NUM_NULLS( + (short) 3, "numNulls"), NUM_DVS((short) 4, "numDVs"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not + * found. + */ + public static _Fields findByThriftId(int fieldId) { + switch (fieldId) { + case 1: // LOW_VALUE + return LOW_VALUE; + case 2: // HIGH_VALUE + return HIGH_VALUE; + case 3: // NUM_NULLS + return NUM_NULLS; + case 4: // NUM_DVS + return NUM_DVS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) + throw new IllegalArgumentException("Field " + fieldId + + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not + * found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __LOWVALUE_ISSET_ID = 0; + private static final int __HIGHVALUE_ISSET_ID = 1; + private static final int __NUMNULLS_ISSET_ID = 2; + private static final int __NUMDVS_ISSET_ID = 3; + private byte __isset_bitfield = 0; + private _Fields optionals[] = { _Fields.LOW_VALUE, _Fields.HIGH_VALUE }; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>( + _Fields.class); + tmpMap.put(_Fields.LOW_VALUE, + new org.apache.thrift.meta_data.FieldMetaData("lowValue", + org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.HIGH_VALUE, + new org.apache.thrift.meta_data.FieldMetaData("highValue", + org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.NUM_NULLS, + new org.apache.thrift.meta_data.FieldMetaData("numNulls", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.NUM_DVS, + new org.apache.thrift.meta_data.FieldMetaData("numDVs", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap( + LongColumnStatsData.class, metaDataMap); + } + + public LongColumnStatsData() { + } + + public LongColumnStatsData(long numNulls, long numDVs) { + this(); + this.numNulls = numNulls; + setNumNullsIsSet(true); + this.numDVs = numDVs; + setNumDVsIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public LongColumnStatsData(LongColumnStatsData other) { + __isset_bitfield = other.__isset_bitfield; + this.lowValue = other.lowValue; + this.highValue = other.highValue; + this.numNulls = other.numNulls; + this.numDVs = other.numDVs; + } + + public LongColumnStatsData deepCopy() { + return new LongColumnStatsData(this); + } + + @Override + public void clear() { + setLowValueIsSet(false); + // this.lowValue = 0; + setHighValueIsSet(false); + // this.highValue = 0; + setNumNullsIsSet(false); + // this.numNulls = 0; + setNumDVsIsSet(false); + // this.numDVs = 0; + } + + public Long getLowValue() { + if (isSetLowValue()) + return this.lowValue; + else + return null; + } + + public void setLowValue(long lowValue) { + this.lowValue = lowValue; + setLowValueIsSet(true); + } + + public void unsetLowValue() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __LOWVALUE_ISSET_ID); + } + + /** + * Returns true if field lowValue is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetLowValue() { + return EncodingUtils.testBit(__isset_bitfield, __LOWVALUE_ISSET_ID); + } + + public void setLowValueIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __LOWVALUE_ISSET_ID, value); + } + + public Long getHighValue() { + if (isSetHighValue()) + return this.highValue; + else + return null; + } + + public void setHighValue(long highValue) { + this.highValue = highValue; + setHighValueIsSet(true); + } + + public void unsetHighValue() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __HIGHVALUE_ISSET_ID); + } + + /** + * Returns true if field highValue is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetHighValue() { + return EncodingUtils.testBit(__isset_bitfield, __HIGHVALUE_ISSET_ID); + } + + public void setHighValueIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __HIGHVALUE_ISSET_ID, value); + } + + public Long getNumNulls() { + if (isSetNumNulls()) + return this.numNulls; + else + return null; + } + + public void setNumNulls(long numNulls) { + this.numNulls = numNulls; + setNumNullsIsSet(true); + } + + public void unsetNumNulls() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMNULLS_ISSET_ID); + } + + /** + * Returns true if field numNulls is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetNumNulls() { + return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); + } + + public void setNumNullsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMNULLS_ISSET_ID, value); + } + + public Long getNumDVs() { + if (isSetNumDVs()) + return this.numDVs; + else + return null; + } + + public void setNumDVs(long numDVs) { + this.numDVs = numDVs; + setNumDVsIsSet(true); + } + + public void unsetNumDVs() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMDVS_ISSET_ID); + } + + /** + * Returns true if field numDVs is set (has been assigned a value) and false + * otherwise + */ + public boolean isSetNumDVs() { + return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID); + } + + public void setNumDVsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMDVS_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case LOW_VALUE: + if (value == null) { + unsetLowValue(); + } else { + setLowValue((Long) value); + } + break; + + case HIGH_VALUE: + if (value == null) { + unsetHighValue(); + } else { + setHighValue((Long) value); + } + break; + + case NUM_NULLS: + if (value == null) { + unsetNumNulls(); + } else { + setNumNulls((Long) value); + } + break; + + case NUM_DVS: + if (value == null) { + unsetNumDVs(); + } else { + setNumDVs((Long) value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case LOW_VALUE: + return Long.valueOf(getLowValue()); + + case HIGH_VALUE: + return Long.valueOf(getHighValue()); + + case NUM_NULLS: + return Long.valueOf(getNumNulls()); + + case NUM_DVS: + return Long.valueOf(getNumDVs()); + + } + throw new IllegalStateException(); + } + + /** + * Returns true if field corresponding to fieldID is set (has been assigned + * a value) and false otherwise + */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case LOW_VALUE: + return isSetLowValue(); + case HIGH_VALUE: + return isSetHighValue(); + case NUM_NULLS: + return isSetNumNulls(); + case NUM_DVS: + return isSetNumDVs(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof LongColumnStatsData) + return this.equals((LongColumnStatsData) that); + return false; + } + + public boolean equals(LongColumnStatsData that) { + if (that == null) + return false; + + boolean this_present_lowValue = true && this.isSetLowValue(); + boolean that_present_lowValue = true && that.isSetLowValue(); + if (this_present_lowValue || that_present_lowValue) { + if (!(this_present_lowValue && that_present_lowValue)) + return false; + if (this.lowValue != that.lowValue) + return false; + } + + boolean this_present_highValue = true && this.isSetHighValue(); + boolean that_present_highValue = true && that.isSetHighValue(); + if (this_present_highValue || that_present_highValue) { + if (!(this_present_highValue && that_present_highValue)) + return false; + if (this.highValue != that.highValue) + return false; + } + + boolean this_present_numNulls = true; + boolean that_present_numNulls = true; + if (this_present_numNulls || that_present_numNulls) { + if (!(this_present_numNulls && that_present_numNulls)) + return false; + if (this.numNulls != that.numNulls) + return false; + } + + boolean this_present_numDVs = true; + boolean that_present_numDVs = true; + if (this_present_numDVs || that_present_numDVs) { + if (!(this_present_numDVs && that_present_numDVs)) + return false; + if (this.numDVs != that.numDVs) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_lowValue = true && (isSetLowValue()); + builder.append(present_lowValue); + if (present_lowValue) + builder.append(lowValue); + + boolean present_highValue = true && (isSetHighValue()); + builder.append(present_highValue); + if (present_highValue) + builder.append(highValue); + + boolean present_numNulls = true; + builder.append(present_numNulls); + if (present_numNulls) + builder.append(numNulls); + + boolean present_numDVs = true; + builder.append(present_numDVs); + if (present_numDVs) + builder.append(numDVs); + + return builder.toHashCode(); + } + + public int compareTo(LongColumnStatsData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + LongColumnStatsData typedOther = (LongColumnStatsData) other; + + lastComparison = Boolean.valueOf(isSetLowValue()).compareTo( + typedOther.isSetLowValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLowValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.lowValue, typedOther.lowValue); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetHighValue()).compareTo( + typedOther.isSetHighValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetHighValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.highValue, typedOther.highValue); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo( + typedOther.isSetNumNulls()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumNulls()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numNulls, typedOther.numNulls); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo( + typedOther.isSetNumDVs()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumDVs()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numDVs, typedOther.numDVs); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) + throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) + throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("LongColumnStatsData("); + boolean first = true; + + if (isSetLowValue()) { + sb.append("lowValue:"); + sb.append(this.lowValue); + first = false; + } + if (isSetHighValue()) { + if (!first) + sb.append(", "); + sb.append("highValue:"); + sb.append(this.highValue); + first = false; + } + if (!first) + sb.append(", "); + sb.append("numNulls:"); + sb.append(this.numNulls); + first = false; + if (!first) + sb.append(", "); + sb.append("numDVs:"); + sb.append(this.numDVs); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetNumNulls()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numNulls' is unset! Struct:" + toString()); + } + + if (!isSetNumDVs()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numDVs' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) + throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) + throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java + // serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class LongColumnStatsDataStandardSchemeFactory implements + SchemeFactory { + public LongColumnStatsDataStandardScheme getScheme() { + return new LongColumnStatsDataStandardScheme(); + } + } + + private static class LongColumnStatsDataStandardScheme extends + StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, + LongColumnStatsData struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // LOW_VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.lowValue = iprot.readI64(); + struct.setLowValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 2: // HIGH_VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.highValue = iprot.readI64(); + struct.setHighValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 3: // NUM_NULLS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 4: // NUM_DVS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numDVs = iprot.readI64(); + struct.setNumDVsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, + LongColumnStatsData struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetLowValue()) { + oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC); + oprot.writeI64(struct.lowValue); + oprot.writeFieldEnd(); + } + if (struct.isSetHighValue()) { + oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC); + oprot.writeI64(struct.highValue); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); + oprot.writeI64(struct.numNulls); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(NUM_DVS_FIELD_DESC); + oprot.writeI64(struct.numDVs); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class LongColumnStatsDataTupleSchemeFactory implements + SchemeFactory { + public LongColumnStatsDataTupleScheme getScheme() { + return new LongColumnStatsDataTupleScheme(); + } + } + + private static class LongColumnStatsDataTupleScheme extends + TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, + LongColumnStatsData struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.numNulls); + oprot.writeI64(struct.numDVs); + BitSet optionals = new BitSet(); + if (struct.isSetLowValue()) { + optionals.set(0); + } + if (struct.isSetHighValue()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetLowValue()) { + oprot.writeI64(struct.lowValue); + } + if (struct.isSetHighValue()) { + oprot.writeI64(struct.highValue); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, + LongColumnStatsData struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + struct.numDVs = iprot.readI64(); + struct.setNumDVsIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.lowValue = iprot.readI64(); + struct.setLowValueIsSet(true); + } + if (incoming.get(1)) { + struct.highValue = iprot.readI64(); + struct.setHighValueIsSet(true); + } + } + } } - diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java index db3274a..c43c4b0 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java @@ -10,12 +10,12 @@ import org.apache.thrift.scheme.IScheme; import org.apache.thrift.scheme.SchemeFactory; import org.apache.thrift.scheme.StandardScheme; - import org.apache.thrift.scheme.TupleScheme; import org.apache.thrift.protocol.TTupleProtocol; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.EncodingUtils; import org.apache.thrift.TException; + import java.util.List; import java.util.ArrayList; import java.util.Map; @@ -28,641 +28,739 @@ import java.util.BitSet; import java.nio.ByteBuffer; import java.util.Arrays; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class StringColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StringColumnStatsData"); - - private static final org.apache.thrift.protocol.TField MAX_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("maxColLen", org.apache.thrift.protocol.TType.I64, (short)1); - private static final org.apache.thrift.protocol.TField AVG_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("avgColLen", org.apache.thrift.protocol.TType.DOUBLE, (short)2); - private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3); - private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField("numDVs", org.apache.thrift.protocol.TType.I64, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new StringColumnStatsDataStandardSchemeFactory()); - schemes.put(TupleScheme.class, new StringColumnStatsDataTupleSchemeFactory()); - } - - private long maxColLen; // required - private double avgColLen; // required - private long numNulls; // required - private long numDVs; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - MAX_COL_LEN((short)1, "maxColLen"), - AVG_COL_LEN((short)2, "avgColLen"), - NUM_NULLS((short)3, "numNulls"), - NUM_DVS((short)4, "numDVs"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // MAX_COL_LEN - return MAX_COL_LEN; - case 2: // AVG_COL_LEN - return AVG_COL_LEN; - case 3: // NUM_NULLS - return NUM_NULLS; - case 4: // NUM_DVS - return NUM_DVS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __MAXCOLLEN_ISSET_ID = 0; - private static final int __AVGCOLLEN_ISSET_ID = 1; - private static final int __NUMNULLS_ISSET_ID = 2; - private static final int __NUMDVS_ISSET_ID = 3; - private byte __isset_bitfield = 0; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.MAX_COL_LEN, new org.apache.thrift.meta_data.FieldMetaData("maxColLen", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.AVG_COL_LEN, new org.apache.thrift.meta_data.FieldMetaData("avgColLen", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.NUM_DVS, new org.apache.thrift.meta_data.FieldMetaData("numDVs", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(StringColumnStatsData.class, metaDataMap); - } - - public StringColumnStatsData() { - } - - public StringColumnStatsData( - long maxColLen, - double avgColLen, - long numNulls, - long numDVs) - { - this(); - this.maxColLen = maxColLen; - setMaxColLenIsSet(true); - this.avgColLen = avgColLen; - setAvgColLenIsSet(true); - this.numNulls = numNulls; - setNumNullsIsSet(true); - this.numDVs = numDVs; - setNumDVsIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public StringColumnStatsData(StringColumnStatsData other) { - __isset_bitfield = other.__isset_bitfield; - this.maxColLen = other.maxColLen; - this.avgColLen = other.avgColLen; - this.numNulls = other.numNulls; - this.numDVs = other.numDVs; - } - - public StringColumnStatsData deepCopy() { - return new StringColumnStatsData(this); - } - - @Override - public void clear() { - setMaxColLenIsSet(false); - this.maxColLen = 0; - setAvgColLenIsSet(false); - this.avgColLen = 0.0; - setNumNullsIsSet(false); - this.numNulls = 0; - setNumDVsIsSet(false); - this.numDVs = 0; - } - - public long getMaxColLen() { - return this.maxColLen; - } - - public void setMaxColLen(long maxColLen) { - this.maxColLen = maxColLen; - setMaxColLenIsSet(true); - } - - public void unsetMaxColLen() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID); - } - - /** Returns true if field maxColLen is set (has been assigned a value) and false otherwise */ - public boolean isSetMaxColLen() { - return EncodingUtils.testBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID); - } - - public void setMaxColLenIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID, value); - } - - public double getAvgColLen() { - return this.avgColLen; - } - - public void setAvgColLen(double avgColLen) { - this.avgColLen = avgColLen; - setAvgColLenIsSet(true); - } - - public void unsetAvgColLen() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID); - } - - /** Returns true if field avgColLen is set (has been assigned a value) and false otherwise */ - public boolean isSetAvgColLen() { - return EncodingUtils.testBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID); - } - - public void setAvgColLenIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID, value); - } - - public long getNumNulls() { - return this.numNulls; - } - - public void setNumNulls(long numNulls) { - this.numNulls = numNulls; - setNumNullsIsSet(true); - } - - public void unsetNumNulls() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNumNulls() { - return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); - } - - public void setNumNullsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value); - } - - public long getNumDVs() { - return this.numDVs; - } - - public void setNumDVs(long numDVs) { - this.numDVs = numDVs; - setNumDVsIsSet(true); - } - - public void unsetNumDVs() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMDVS_ISSET_ID); - } - - /** Returns true if field numDVs is set (has been assigned a value) and false otherwise */ - public boolean isSetNumDVs() { - return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID); - } - - public void setNumDVsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMDVS_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case MAX_COL_LEN: - if (value == null) { - unsetMaxColLen(); - } else { - setMaxColLen((Long)value); - } - break; - - case AVG_COL_LEN: - if (value == null) { - unsetAvgColLen(); - } else { - setAvgColLen((Double)value); - } - break; - - case NUM_NULLS: - if (value == null) { - unsetNumNulls(); - } else { - setNumNulls((Long)value); - } - break; - - case NUM_DVS: - if (value == null) { - unsetNumDVs(); - } else { - setNumDVs((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case MAX_COL_LEN: - return Long.valueOf(getMaxColLen()); - - case AVG_COL_LEN: - return Double.valueOf(getAvgColLen()); - - case NUM_NULLS: - return Long.valueOf(getNumNulls()); - - case NUM_DVS: - return Long.valueOf(getNumDVs()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case MAX_COL_LEN: - return isSetMaxColLen(); - case AVG_COL_LEN: - return isSetAvgColLen(); - case NUM_NULLS: - return isSetNumNulls(); - case NUM_DVS: - return isSetNumDVs(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof StringColumnStatsData) - return this.equals((StringColumnStatsData)that); - return false; - } - - public boolean equals(StringColumnStatsData that) { - if (that == null) - return false; - - boolean this_present_maxColLen = true; - boolean that_present_maxColLen = true; - if (this_present_maxColLen || that_present_maxColLen) { - if (!(this_present_maxColLen && that_present_maxColLen)) - return false; - if (this.maxColLen != that.maxColLen) - return false; - } - - boolean this_present_avgColLen = true; - boolean that_present_avgColLen = true; - if (this_present_avgColLen || that_present_avgColLen) { - if (!(this_present_avgColLen && that_present_avgColLen)) - return false; - if (this.avgColLen != that.avgColLen) - return false; - } - - boolean this_present_numNulls = true; - boolean that_present_numNulls = true; - if (this_present_numNulls || that_present_numNulls) { - if (!(this_present_numNulls && that_present_numNulls)) - return false; - if (this.numNulls != that.numNulls) - return false; - } - - boolean this_present_numDVs = true; - boolean that_present_numDVs = true; - if (this_present_numDVs || that_present_numDVs) { - if (!(this_present_numDVs && that_present_numDVs)) - return false; - if (this.numDVs != that.numDVs) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_maxColLen = true; - builder.append(present_maxColLen); - if (present_maxColLen) - builder.append(maxColLen); - - boolean present_avgColLen = true; - builder.append(present_avgColLen); - if (present_avgColLen) - builder.append(avgColLen); - - boolean present_numNulls = true; - builder.append(present_numNulls); - if (present_numNulls) - builder.append(numNulls); - - boolean present_numDVs = true; - builder.append(present_numDVs); - if (present_numDVs) - builder.append(numDVs); - - return builder.toHashCode(); - } - - public int compareTo(StringColumnStatsData other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - StringColumnStatsData typedOther = (StringColumnStatsData)other; - - lastComparison = Boolean.valueOf(isSetMaxColLen()).compareTo(typedOther.isSetMaxColLen()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetMaxColLen()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxColLen, typedOther.maxColLen); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetAvgColLen()).compareTo(typedOther.isSetAvgColLen()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetAvgColLen()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.avgColLen, typedOther.avgColLen); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(typedOther.isSetNumNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, typedOther.numNulls); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo(typedOther.isSetNumDVs()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNumDVs()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDVs, typedOther.numDVs); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("StringColumnStatsData("); - boolean first = true; - - sb.append("maxColLen:"); - sb.append(this.maxColLen); - first = false; - if (!first) sb.append(", "); - sb.append("avgColLen:"); - sb.append(this.avgColLen); - first = false; - if (!first) sb.append(", "); - sb.append("numNulls:"); - sb.append(this.numNulls); - first = false; - if (!first) sb.append(", "); - sb.append("numDVs:"); - sb.append(this.numDVs); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetMaxColLen()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxColLen' is unset! Struct:" + toString()); - } - - if (!isSetAvgColLen()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'avgColLen' is unset! Struct:" + toString()); - } - - if (!isSetNumNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString()); - } - - if (!isSetNumDVs()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'numDVs' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class StringColumnStatsDataStandardSchemeFactory implements SchemeFactory { - public StringColumnStatsDataStandardScheme getScheme() { - return new StringColumnStatsDataStandardScheme(); - } - } - - private static class StringColumnStatsDataStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, StringColumnStatsData struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // MAX_COL_LEN - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.maxColLen = iprot.readI64(); - struct.setMaxColLenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // AVG_COL_LEN - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.avgColLen = iprot.readDouble(); - struct.setAvgColLenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // NUM_NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // NUM_DVS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.numDVs = iprot.readI64(); - struct.setNumDVsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, StringColumnStatsData struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(MAX_COL_LEN_FIELD_DESC); - oprot.writeI64(struct.maxColLen); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(AVG_COL_LEN_FIELD_DESC); - oprot.writeDouble(struct.avgColLen); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); - oprot.writeI64(struct.numNulls); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(NUM_DVS_FIELD_DESC); - oprot.writeI64(struct.numDVs); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class StringColumnStatsDataTupleSchemeFactory implements SchemeFactory { - public StringColumnStatsDataTupleScheme getScheme() { - return new StringColumnStatsDataTupleScheme(); - } - } - - private static class StringColumnStatsDataTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, StringColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI64(struct.maxColLen); - oprot.writeDouble(struct.avgColLen); - oprot.writeI64(struct.numNulls); - oprot.writeI64(struct.numDVs); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, StringColumnStatsData struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.maxColLen = iprot.readI64(); - struct.setMaxColLenIsSet(true); - struct.avgColLen = iprot.readDouble(); - struct.setAvgColLenIsSet(true); - struct.numNulls = iprot.readI64(); - struct.setNumNullsIsSet(true); - struct.numDVs = iprot.readI64(); - struct.setNumDVsIsSet(true); - } - } +public class StringColumnStatsData + implements + org.apache.thrift.TBase, + java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct( + "StringColumnStatsData"); + + private static final org.apache.thrift.protocol.TField MAX_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField( + "maxColLen", org.apache.thrift.protocol.TType.I64, (short) 1); + private static final org.apache.thrift.protocol.TField AVG_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField( + "avgColLen", org.apache.thrift.protocol.TType.DOUBLE, (short) 2); + private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numNulls", org.apache.thrift.protocol.TType.I64, (short) 3); + private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField( + "numDVs", org.apache.thrift.protocol.TType.I64, (short) 4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, + new StringColumnStatsDataStandardSchemeFactory()); + schemes.put(TupleScheme.class, + new StringColumnStatsDataTupleSchemeFactory()); + } + + private long maxColLen; // required + private double avgColLen; // required + private long numNulls; // required + private long numDVs; // required + + /** + * The set of fields this struct contains, along with convenience methods + * for finding and manipulating them. + */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MAX_COL_LEN((short) 1, "maxColLen"), AVG_COL_LEN((short) 2, "avgColLen"), NUM_NULLS( + (short) 3, "numNulls"), NUM_DVS((short) 4, "numDVs"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not + * found. + */ + public static _Fields findByThriftId(int fieldId) { + switch (fieldId) { + case 1: // MAX_COL_LEN + return MAX_COL_LEN; + case 2: // AVG_COL_LEN + return AVG_COL_LEN; + case 3: // NUM_NULLS + return NUM_NULLS; + case 4: // NUM_DVS + return NUM_DVS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) + throw new IllegalArgumentException("Field " + fieldId + + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not + * found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __MAXCOLLEN_ISSET_ID = 0; + private static final int __AVGCOLLEN_ISSET_ID = 1; + private static final int __NUMNULLS_ISSET_ID = 2; + private static final int __NUMDVS_ISSET_ID = 3; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>( + _Fields.class); + tmpMap.put(_Fields.MAX_COL_LEN, + new org.apache.thrift.meta_data.FieldMetaData("maxColLen", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.AVG_COL_LEN, + new org.apache.thrift.meta_data.FieldMetaData("avgColLen", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.DOUBLE))); + tmpMap.put(_Fields.NUM_NULLS, + new org.apache.thrift.meta_data.FieldMetaData("numNulls", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.NUM_DVS, + new org.apache.thrift.meta_data.FieldMetaData("numDVs", + org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData( + org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap( + StringColumnStatsData.class, metaDataMap); + } + + public StringColumnStatsData() { + } + + public StringColumnStatsData(long maxColLen, double avgColLen, + long numNulls, long numDVs) { + this(); + this.maxColLen = maxColLen; + setMaxColLenIsSet(true); + this.avgColLen = avgColLen; + setAvgColLenIsSet(true); + this.numNulls = numNulls; + setNumNullsIsSet(true); + this.numDVs = numDVs; + setNumDVsIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public StringColumnStatsData(StringColumnStatsData other) { + __isset_bitfield = other.__isset_bitfield; + this.maxColLen = other.maxColLen; + this.avgColLen = other.avgColLen; + this.numNulls = other.numNulls; + this.numDVs = other.numDVs; + } + + public StringColumnStatsData deepCopy() { + return new StringColumnStatsData(this); + } + + @Override + public void clear() { + setMaxColLenIsSet(false); + // this.maxColLen = 0; + setAvgColLenIsSet(false); + // this.avgColLen = 0.0; + setNumNullsIsSet(false); + // this.numNulls = 0; + setNumDVsIsSet(false); + // this.numDVs = 0; + } + + public Long getMaxColLen() { + if (isSetMaxColLen()) + return this.maxColLen; + else + return null; + } + + public void setMaxColLen(long maxColLen) { + this.maxColLen = maxColLen; + setMaxColLenIsSet(true); + } + + public void unsetMaxColLen() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __MAXCOLLEN_ISSET_ID); + } + + /** + * Returns true if field maxColLen is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetMaxColLen() { + return EncodingUtils.testBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID); + } + + public void setMaxColLenIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __MAXCOLLEN_ISSET_ID, value); + } + + public Double getAvgColLen() { + if (isSetAvgColLen()) + return this.avgColLen; + else + return null; + } + + public void setAvgColLen(double avgColLen) { + this.avgColLen = avgColLen; + setAvgColLenIsSet(true); + } + + public void unsetAvgColLen() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __AVGCOLLEN_ISSET_ID); + } + + /** + * Returns true if field avgColLen is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetAvgColLen() { + return EncodingUtils.testBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID); + } + + public void setAvgColLenIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __AVGCOLLEN_ISSET_ID, value); + } + + public Long getNumNulls() { + if (isSetNumNulls()) + return this.numNulls; + else + return null; + } + + public void setNumNulls(long numNulls) { + this.numNulls = numNulls; + setNumNullsIsSet(true); + } + + public void unsetNumNulls() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMNULLS_ISSET_ID); + } + + /** + * Returns true if field numNulls is set (has been assigned a value) and + * false otherwise + */ + public boolean isSetNumNulls() { + return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID); + } + + public void setNumNullsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMNULLS_ISSET_ID, value); + } + + public Long getNumDVs() { + if (isSetNumDVs()) + return this.numDVs; + else + return null; + } + + public void setNumDVs(long numDVs) { + this.numDVs = numDVs; + setNumDVsIsSet(true); + } + + public void unsetNumDVs() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, + __NUMDVS_ISSET_ID); + } + + /** + * Returns true if field numDVs is set (has been assigned a value) and false + * otherwise + */ + public boolean isSetNumDVs() { + return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID); + } + + public void setNumDVsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, + __NUMDVS_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MAX_COL_LEN: + if (value == null) { + unsetMaxColLen(); + } else { + setMaxColLen((Long) value); + } + break; + + case AVG_COL_LEN: + if (value == null) { + unsetAvgColLen(); + } else { + setAvgColLen((Double) value); + } + break; + + case NUM_NULLS: + if (value == null) { + unsetNumNulls(); + } else { + setNumNulls((Long) value); + } + break; + + case NUM_DVS: + if (value == null) { + unsetNumDVs(); + } else { + setNumDVs((Long) value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MAX_COL_LEN: + return Long.valueOf(getMaxColLen()); + + case AVG_COL_LEN: + return Double.valueOf(getAvgColLen()); + + case NUM_NULLS: + return Long.valueOf(getNumNulls()); + + case NUM_DVS: + return Long.valueOf(getNumDVs()); + + } + throw new IllegalStateException(); + } + + /** + * Returns true if field corresponding to fieldID is set (has been assigned + * a value) and false otherwise + */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MAX_COL_LEN: + return isSetMaxColLen(); + case AVG_COL_LEN: + return isSetAvgColLen(); + case NUM_NULLS: + return isSetNumNulls(); + case NUM_DVS: + return isSetNumDVs(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof StringColumnStatsData) + return this.equals((StringColumnStatsData) that); + return false; + } + + public boolean equals(StringColumnStatsData that) { + if (that == null) + return false; + + boolean this_present_maxColLen = true; + boolean that_present_maxColLen = true; + if (this_present_maxColLen || that_present_maxColLen) { + if (!(this_present_maxColLen && that_present_maxColLen)) + return false; + if (this.maxColLen != that.maxColLen) + return false; + } + + boolean this_present_avgColLen = true; + boolean that_present_avgColLen = true; + if (this_present_avgColLen || that_present_avgColLen) { + if (!(this_present_avgColLen && that_present_avgColLen)) + return false; + if (this.avgColLen != that.avgColLen) + return false; + } + + boolean this_present_numNulls = true; + boolean that_present_numNulls = true; + if (this_present_numNulls || that_present_numNulls) { + if (!(this_present_numNulls && that_present_numNulls)) + return false; + if (this.numNulls != that.numNulls) + return false; + } + + boolean this_present_numDVs = true; + boolean that_present_numDVs = true; + if (this_present_numDVs || that_present_numDVs) { + if (!(this_present_numDVs && that_present_numDVs)) + return false; + if (this.numDVs != that.numDVs) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_maxColLen = true; + builder.append(present_maxColLen); + if (present_maxColLen) + builder.append(maxColLen); + + boolean present_avgColLen = true; + builder.append(present_avgColLen); + if (present_avgColLen) + builder.append(avgColLen); + + boolean present_numNulls = true; + builder.append(present_numNulls); + if (present_numNulls) + builder.append(numNulls); + + boolean present_numDVs = true; + builder.append(present_numDVs); + if (present_numDVs) + builder.append(numDVs); + + return builder.toHashCode(); + } + + public int compareTo(StringColumnStatsData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + StringColumnStatsData typedOther = (StringColumnStatsData) other; + + lastComparison = Boolean.valueOf(isSetMaxColLen()).compareTo( + typedOther.isSetMaxColLen()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMaxColLen()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.maxColLen, typedOther.maxColLen); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetAvgColLen()).compareTo( + typedOther.isSetAvgColLen()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetAvgColLen()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.avgColLen, typedOther.avgColLen); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo( + typedOther.isSetNumNulls()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumNulls()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numNulls, typedOther.numNulls); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo( + typedOther.isSetNumDVs()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumDVs()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo( + this.numDVs, typedOther.numDVs); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) + throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) + throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("StringColumnStatsData("); + boolean first = true; + + sb.append("maxColLen:"); + sb.append(this.maxColLen); + first = false; + if (!first) + sb.append(", "); + sb.append("avgColLen:"); + sb.append(this.avgColLen); + first = false; + if (!first) + sb.append(", "); + sb.append("numNulls:"); + sb.append(this.numNulls); + first = false; + if (!first) + sb.append(", "); + sb.append("numDVs:"); + sb.append(this.numDVs); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetMaxColLen()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'maxColLen' is unset! Struct:" + toString()); + } + + if (!isSetAvgColLen()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'avgColLen' is unset! Struct:" + toString()); + } + + if (!isSetNumNulls()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numNulls' is unset! Struct:" + toString()); + } + + if (!isSetNumDVs()) { + throw new org.apache.thrift.protocol.TProtocolException( + "Required field 'numDVs' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) + throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) + throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java + // serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol( + new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class StringColumnStatsDataStandardSchemeFactory implements + SchemeFactory { + public StringColumnStatsDataStandardScheme getScheme() { + return new StringColumnStatsDataStandardScheme(); + } + } + + private static class StringColumnStatsDataStandardScheme extends + StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, + StringColumnStatsData struct) + throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MAX_COL_LEN + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.maxColLen = iprot.readI64(); + struct.setMaxColLenIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 2: // AVG_COL_LEN + if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { + struct.avgColLen = iprot.readDouble(); + struct.setAvgColLenIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 3: // NUM_NULLS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + case 4: // NUM_DVS + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.numDVs = iprot.readI64(); + struct.setNumDVsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, + schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, + StringColumnStatsData struct) + throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(MAX_COL_LEN_FIELD_DESC); + oprot.writeI64(struct.maxColLen); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(AVG_COL_LEN_FIELD_DESC); + oprot.writeDouble(struct.avgColLen); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC); + oprot.writeI64(struct.numNulls); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(NUM_DVS_FIELD_DESC); + oprot.writeI64(struct.numDVs); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class StringColumnStatsDataTupleSchemeFactory implements + SchemeFactory { + public StringColumnStatsDataTupleScheme getScheme() { + return new StringColumnStatsDataTupleScheme(); + } + } + + private static class StringColumnStatsDataTupleScheme extends + TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, + StringColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.maxColLen); + oprot.writeDouble(struct.avgColLen); + oprot.writeI64(struct.numNulls); + oprot.writeI64(struct.numDVs); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, + StringColumnStatsData struct) + throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.maxColLen = iprot.readI64(); + struct.setMaxColLenIsSet(true); + struct.avgColLen = iprot.readDouble(); + struct.setAvgColLenIsSet(true); + struct.numNulls = iprot.readI64(); + struct.setNumNullsIsSet(true); + struct.numDVs = iprot.readI64(); + struct.setNumDVsIsSet(true); + } + } } - diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 32da869..f924fb6 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -1229,6 +1229,8 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) return client.get_table_statistics_req( new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); } + + /** {@inheritDoc} */ @Override diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 5e2cad7..e6f0afa 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -135,7 +135,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; - /** * This class is the interface between the application logic and the database * store that contains the objects. Refrain putting any logic in mode.M* objects @@ -144,6555 +143,7154 @@ * filestore. */ public class ObjectStore implements RawStore, Configurable { - private static Properties prop = null; - private static PersistenceManagerFactory pmf = null; - - private static Lock pmfPropLock = new ReentrantLock(); - private static final Log LOG = LogFactory.getLog(ObjectStore.class.getName()); - - private static enum TXN_STATUS { - NO_STATE, OPEN, COMMITED, ROLLBACK - } - - private static final Map PINCLASSMAP; - static { - Map map = new HashMap(); - map.put("table", MTable.class); - map.put("storagedescriptor", MStorageDescriptor.class); - map.put("serdeinfo", MSerDeInfo.class); - map.put("partition", MPartition.class); - map.put("database", MDatabase.class); - map.put("type", MType.class); - map.put("fieldschema", MFieldSchema.class); - map.put("order", MOrder.class); - PINCLASSMAP = Collections.unmodifiableMap(map); - } - - private boolean isInitialized = false; - private PersistenceManager pm = null; - private MetaStoreDirectSql directSql = null; - private PartitionExpressionProxy expressionProxy = null; - private Configuration hiveConf; - int openTrasactionCalls = 0; - private Transaction currentTransaction = null; - private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE; - private final AtomicBoolean isSchemaVerified = new AtomicBoolean(false); - - public ObjectStore() { - } - - @Override - public Configuration getConf() { - return hiveConf; - } - - /** - * Called whenever this object is instantiated using ReflectionUils, and also - * on connection retries. In cases of connection retries, conf will usually - * contain modified values. - */ - @Override - @SuppressWarnings("nls") - public void setConf(Configuration conf) { - // Although an instance of ObjectStore is accessed by one thread, there may - // be many threads with ObjectStore instances. So the static variables - // pmf and prop need to be protected with locks. - pmfPropLock.lock(); - try { - isInitialized = false; - hiveConf = conf; - Properties propsFromConf = getDataSourceProps(conf); - boolean propsChanged = !propsFromConf.equals(prop); - - if (propsChanged) { - pmf = null; - prop = null; - } - - assert(!isActiveTransaction()); - shutdown(); - // Always want to re-create pm as we don't know if it were created by the - // most recent instance of the pmf - pm = null; - directSql = null; - expressionProxy = null; - openTrasactionCalls = 0; - currentTransaction = null; - transactionStatus = TXN_STATUS.NO_STATE; - - initialize(propsFromConf); - - if (!isInitialized) { - throw new RuntimeException( - "Unable to create persistence manager. Check dss.log for details"); - } else { - LOG.info("Initialized ObjectStore"); - } - } finally { - pmfPropLock.unlock(); - } - } - - private ClassLoader classLoader; - { - classLoader = Thread.currentThread().getContextClassLoader(); - if (classLoader == null) { - classLoader = ObjectStore.class.getClassLoader(); - } - } - - @SuppressWarnings("nls") - private void initialize(Properties dsProps) { - LOG.info("ObjectStore, initialize called"); - prop = dsProps; - pm = getPersistenceManager(); - isInitialized = pm != null; - if (isInitialized) { - expressionProxy = createExpressionProxy(hiveConf); - directSql = new MetaStoreDirectSql(pm); - } - } - - /** - * Creates the proxy used to evaluate expressions. This is here to prevent circular - * dependency - ql -> metastore client <-> metastore server -> ql. If server and - * client are split, this can be removed. - * @param conf Configuration. - * @return The partition expression proxy. - */ - private static PartitionExpressionProxy createExpressionProxy(Configuration conf) { - String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS); - try { - @SuppressWarnings("unchecked") - Class clazz = - (Class)MetaStoreUtils.getClass(className); - return MetaStoreUtils.newInstance( - clazz, new Class[0], new Object[0]); - } catch (MetaException e) { - LOG.error("Error loading PartitionExpressionProxy", e); - throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage()); - } - } - - /** - * Properties specified in hive-default.xml override the properties specified - * in jpox.properties. - */ - @SuppressWarnings("nls") - private static Properties getDataSourceProps(Configuration conf) { - Properties prop = new Properties(); - - Iterator> iter = conf.iterator(); - while (iter.hasNext()) { - Map.Entry e = iter.next(); - if (e.getKey().contains("datanucleus") || e.getKey().contains("jdo")) { - Object prevVal = prop.setProperty(e.getKey(), conf.get(e.getKey())); - if (LOG.isDebugEnabled() - && !e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { - LOG.debug("Overriding " + e.getKey() + " value " + prevVal - + " from jpox.properties with " + e.getValue()); - } - } - } - - if (LOG.isDebugEnabled()) { - for (Entry e : prop.entrySet()) { - if (!e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { - LOG.debug(e.getKey() + " = " + e.getValue()); - } - } - } - return prop; - } - - private static synchronized PersistenceManagerFactory getPMF() { - if (pmf == null) { - pmf = JDOHelper.getPersistenceManagerFactory(prop); - DataStoreCache dsc = pmf.getDataStoreCache(); - if (dsc != null) { - HiveConf conf = new HiveConf(ObjectStore.class); - String objTypes = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES); - LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"" + objTypes + "\""); - if (objTypes != null && objTypes.length() > 0) { - objTypes = objTypes.toLowerCase(); - String[] typeTokens = objTypes.split(","); - for (String type : typeTokens) { - type = type.trim(); - if (PINCLASSMAP.containsKey(type)) { - dsc.pinAll(true, PINCLASSMAP.get(type)); - } - else { - LOG.warn(type + " is not one of the pinnable object types: " + org.apache.commons.lang.StringUtils.join(PINCLASSMAP.keySet(), " ")); - } - } - } - } else { - LOG.warn("PersistenceManagerFactory returned null DataStoreCache object. Unable to initialize object pin types defined by hive.metastore.cache.pinobjtypes"); - } - } - return pmf; - } - - @InterfaceAudience.LimitedPrivate({"HCATALOG"}) - @InterfaceStability.Evolving - public PersistenceManager getPersistenceManager() { - return getPMF().getPersistenceManager(); - } - - @Override - public void shutdown() { - if (pm != null) { - pm.close(); - } - } - - /** - * Opens a new one or the one already created Every call of this function must - * have corresponding commit or rollback function call - * - * @return an active transaction - */ - - @Override - public boolean openTransaction() { - openTrasactionCalls++; - if (openTrasactionCalls == 1) { - currentTransaction = pm.currentTransaction(); - currentTransaction.begin(); - transactionStatus = TXN_STATUS.OPEN; - } else { - // something is wrong since openTransactionCalls is greater than 1 but - // currentTransaction is not active - assert ((currentTransaction != null) && (currentTransaction.isActive())); - } - - boolean result = currentTransaction.isActive(); - debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result); - return result; - } - - /** - * if this is the commit of the first open call then an actual commit is - * called. - * - * @return Always returns true - */ - @Override - @SuppressWarnings("nls") - public boolean commitTransaction() { - if (TXN_STATUS.ROLLBACK == transactionStatus) { - debugLog("Commit transaction: rollback"); - return false; - } - if (openTrasactionCalls <= 0) { - RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = " - + openTrasactionCalls + ". This probably indicates that there are unbalanced " + - "calls to openTransaction/commitTransaction"); - LOG.error(e); - throw e; - } - if (!currentTransaction.isActive()) { - RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = " - + openTrasactionCalls + ". This probably indicates that there are unbalanced " + - "calls to openTransaction/commitTransaction"); - LOG.error(e); - throw e; - } - openTrasactionCalls--; - debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive()); - - if ((openTrasactionCalls == 0) && currentTransaction.isActive()) { - transactionStatus = TXN_STATUS.COMMITED; - currentTransaction.commit(); - } - - return true; - } - - /** - * @return true if there is an active transaction. If the current transaction - * is either committed or rolled back it returns false - */ - public boolean isActiveTransaction() { - if (currentTransaction == null) { - return false; - } - return currentTransaction.isActive(); - } - - /** - * Rolls back the current transaction if it is active - */ - @Override - public void rollbackTransaction() { - if (openTrasactionCalls < 1) { - debugLog("rolling back transaction: no open transactions: " + openTrasactionCalls); - return; - } - openTrasactionCalls = 0; - debugLog("Rollback transaction, isActive: " + currentTransaction.isActive()); - if (currentTransaction.isActive() - && transactionStatus != TXN_STATUS.ROLLBACK) { - transactionStatus = TXN_STATUS.ROLLBACK; - // could already be rolled back - currentTransaction.rollback(); - // remove all detached objects from the cache, since the transaction is - // being rolled back they are no longer relevant, and this prevents them - // from reattaching in future transactions - pm.evictAll(); - } - } - - @Override - public void createDatabase(Database db) throws InvalidObjectException, MetaException { - boolean commited = false; - MDatabase mdb = new MDatabase(); - mdb.setName(db.getName().toLowerCase()); - mdb.setLocationUri(db.getLocationUri()); - mdb.setDescription(db.getDescription()); - mdb.setParameters(db.getParameters()); - mdb.setOwnerName(db.getOwnerName()); - PrincipalType ownerType = db.getOwnerType(); - mdb.setOwnerType((null == ownerType ? PrincipalType.USER.name() : ownerType.name())); - try { - openTransaction(); - pm.makePersistent(mdb); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - } - - @SuppressWarnings("nls") - private MDatabase getMDatabase(String name) throws NoSuchObjectException { - MDatabase mdb = null; - boolean commited = false; - try { - openTransaction(); - name = name.toLowerCase().trim(); - Query query = pm.newQuery(MDatabase.class, "name == dbname"); - query.declareParameters("java.lang.String dbname"); - query.setUnique(true); - mdb = (MDatabase) query.execute(name); - pm.retrieve(mdb); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - if (mdb == null) { - throw new NoSuchObjectException("There is no database named " + name); - } - return mdb; - } - - @Override - public Database getDatabase(String name) throws NoSuchObjectException { - MDatabase mdb = null; - boolean commited = false; - try { - openTransaction(); - mdb = getMDatabase(name); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - Database db = new Database(); - db.setName(mdb.getName()); - db.setDescription(mdb.getDescription()); - db.setLocationUri(mdb.getLocationUri()); - db.setParameters(mdb.getParameters()); - db.setOwnerName(mdb.getOwnerName()); - String type = mdb.getOwnerType(); - db.setOwnerType((null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type)); - return db; - } - - /** - * Alter the database object in metastore. Currently only the parameters - * of the database or the owner can be changed. - * @param dbName the database name - * @param db the Hive Database object - * @throws MetaException - * @throws NoSuchObjectException - */ - @Override - public boolean alterDatabase(String dbName, Database db) - throws MetaException, NoSuchObjectException { - - MDatabase mdb = null; - boolean committed = false; - try { - mdb = getMDatabase(dbName); - mdb.setParameters(db.getParameters()); - mdb.setOwnerName(db.getOwnerName()); - if (db.getOwnerType() != null) { - mdb.setOwnerType(db.getOwnerType().name()); - } - openTransaction(); - pm.makePersistent(mdb); - committed = commitTransaction(); - } finally { - if (!committed) { - rollbackTransaction(); - return false; - } - } - return true; - } - - @Override - public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { - boolean success = false; - LOG.info("Dropping database " + dbname + " along with all tables"); - dbname = dbname.toLowerCase(); - try { - openTransaction(); - - // then drop the database - MDatabase db = getMDatabase(dbname); - pm.retrieve(db); - if (db != null) { - List dbGrants = this.listDatabaseGrants(dbname); - if (dbGrants != null && dbGrants.size() > 0) { - pm.deletePersistentAll(dbGrants); - } - pm.deletePersistent(db); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - - @Override - public List getDatabases(String pattern) throws MetaException { - boolean commited = false; - List databases = null; - try { - openTransaction(); - // Take the pattern and split it on the | to get all the composing - // patterns - String[] subpatterns = pattern.trim().split("\\|"); - String query = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where ("; - boolean first = true; - for (String subpattern : subpatterns) { - subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); - if (!first) { - query = query + " || "; - } - query = query + " name.matches(\"" + subpattern + "\")"; - first = false; - } - query = query + ")"; - - Query q = pm.newQuery(query); - q.setResult("name"); - q.setOrdering("name ascending"); - Collection names = (Collection) q.execute(); - databases = new ArrayList(); - for (Iterator i = names.iterator(); i.hasNext();) { - databases.add((String) i.next()); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return databases; - } - - @Override - public List getAllDatabases() throws MetaException { - return getDatabases(".*"); - } - - private MType getMType(Type type) { - List fields = new ArrayList(); - if (type.getFields() != null) { - for (FieldSchema field : type.getFields()) { - fields.add(new MFieldSchema(field.getName(), field.getType(), field - .getComment())); - } - } - return new MType(type.getName(), type.getType1(), type.getType2(), fields); - } - - private Type getType(MType mtype) { - List fields = new ArrayList(); - if (mtype.getFields() != null) { - for (MFieldSchema field : mtype.getFields()) { - fields.add(new FieldSchema(field.getName(), field.getType(), field - .getComment())); - } - } - Type ret = new Type(); - ret.setName(mtype.getName()); - ret.setType1(mtype.getType1()); - ret.setType2(mtype.getType2()); - ret.setFields(fields); - return ret; - } - - @Override - public boolean createType(Type type) { - boolean success = false; - MType mtype = getMType(type); - boolean commited = false; - try { - openTransaction(); - pm.makePersistent(mtype); - commited = commitTransaction(); - success = true; - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return success; - } - - @Override - public Type getType(String typeName) { - Type type = null; - boolean commited = false; - try { - openTransaction(); - Query query = pm.newQuery(MType.class, "name == typeName"); - query.declareParameters("java.lang.String typeName"); - query.setUnique(true); - MType mtype = (MType) query.execute(typeName.trim()); - pm.retrieve(type); - if (mtype != null) { - type = getType(mtype); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return type; - } - - @Override - public boolean dropType(String typeName) { - boolean success = false; - try { - openTransaction(); - Query query = pm.newQuery(MType.class, "name == typeName"); - query.declareParameters("java.lang.String typeName"); - query.setUnique(true); - MType type = (MType) query.execute(typeName.trim()); - pm.retrieve(type); - if (type != null) { - pm.deletePersistent(type); - } - success = commitTransaction(); - } catch (JDOObjectNotFoundException e) { - success = commitTransaction(); - LOG.debug("type not found " + typeName, e); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { - boolean commited = false; - try { - openTransaction(); - MTable mtbl = convertToMTable(tbl); - pm.makePersistent(mtbl); - PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges(); - List toPersistPrivObjs = new ArrayList(); - if (principalPrivs != null) { - int now = (int)(System.currentTimeMillis()/1000); - - Map> userPrivs = principalPrivs.getUserPrivileges(); - putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, userPrivs, PrincipalType.USER); - - Map> groupPrivs = principalPrivs.getGroupPrivileges(); - putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, groupPrivs, PrincipalType.GROUP); - - Map> rolePrivs = principalPrivs.getRolePrivileges(); - putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, rolePrivs, PrincipalType.ROLE); - } - pm.makePersistentAll(toPersistPrivObjs); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - } - - /** - * Convert PrivilegeGrantInfo from privMap to MTablePrivilege, and add all of - * them to the toPersistPrivObjs. These privilege objects will be persisted as - * part of createTable. - * - * @param mtbl - * @param toPersistPrivObjs - * @param now - * @param privMap - * @param type - */ - private void putPersistentPrivObjects(MTable mtbl, List toPersistPrivObjs, - int now, Map> privMap, PrincipalType type) { - if (privMap != null) { - for (Map.Entry> entry : privMap - .entrySet()) { - String principalName = entry.getKey(); - List privs = entry.getValue(); - for (int i = 0; i < privs.size(); i++) { - PrivilegeGrantInfo priv = privs.get(i); - if (priv == null) { - continue; - } - MTablePrivilege mTblSec = new MTablePrivilege( - principalName, type.toString(), mtbl, priv.getPrivilege(), - now, priv.getGrantor(), priv.getGrantorType().toString(), priv - .isGrantOption()); - toPersistPrivObjs.add(mTblSec); - } - } - } - } - - @Override - public boolean dropTable(String dbName, String tableName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean success = false; - try { - openTransaction(); - MTable tbl = getMTable(dbName, tableName); - pm.retrieve(tbl); - if (tbl != null) { - // first remove all the grants - List tabGrants = listAllTableGrants(dbName, tableName); - if (tabGrants != null && tabGrants.size() > 0) { - pm.deletePersistentAll(tabGrants); - } - List tblColGrants = listTableAllColumnGrants(dbName, - tableName); - if (tblColGrants != null && tblColGrants.size() > 0) { - pm.deletePersistentAll(tblColGrants); - } - - List partGrants = this.listTableAllPartitionGrants(dbName, tableName); - if (partGrants != null && partGrants.size() > 0) { - pm.deletePersistentAll(partGrants); - } - - List partColGrants = listTableAllPartitionColumnGrants(dbName, - tableName); - if (partColGrants != null && partColGrants.size() > 0) { - pm.deletePersistentAll(partColGrants); - } - // delete column statistics if present - try { - deleteTableColumnStatistics(dbName, tableName, null); - } catch (NoSuchObjectException e) { - LOG.info("Found no table level column statistics associated with db " + dbName + - " table " + tableName + " record to delete"); - } - - preDropStorageDescriptor(tbl.getSd()); - // then remove the table - pm.deletePersistentAll(tbl); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - @Override - public Table getTable(String dbName, String tableName) throws MetaException { - boolean commited = false; - Table tbl = null; - try { - openTransaction(); - tbl = convertToTable(getMTable(dbName, tableName)); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return tbl; - } - - @Override - public List getTables(String dbName, String pattern) - throws MetaException { - boolean commited = false; - List tbls = null; - try { - openTransaction(); - dbName = dbName.toLowerCase().trim(); - // Take the pattern and split it on the | to get all the composing - // patterns - String[] subpatterns = pattern.trim().split("\\|"); - String query = - "select tableName from org.apache.hadoop.hive.metastore.model.MTable " - + "where database.name == dbName && ("; - boolean first = true; - for (String subpattern : subpatterns) { - subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); - if (!first) { - query = query + " || "; - } - query = query + " tableName.matches(\"" + subpattern + "\")"; - first = false; - } - query = query + ")"; - - Query q = pm.newQuery(query); - q.declareParameters("java.lang.String dbName"); - q.setResult("tableName"); - q.setOrdering("tableName ascending"); - Collection names = (Collection) q.execute(dbName); - tbls = new ArrayList(); - for (Iterator i = names.iterator(); i.hasNext();) { - tbls.add((String) i.next()); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return tbls; - } - - @Override - public List getAllTables(String dbName) throws MetaException { - return getTables(dbName, ".*"); - } - - private MTable getMTable(String db, String table) { - MTable mtbl = null; - boolean commited = false; - try { - openTransaction(); - db = db.toLowerCase().trim(); - table = table.toLowerCase().trim(); - Query query = pm.newQuery(MTable.class, "tableName == table && database.name == db"); - query.declareParameters("java.lang.String table, java.lang.String db"); - query.setUnique(true); - mtbl = (MTable) query.execute(table, db); - pm.retrieve(mtbl); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return mtbl; - } - - @Override - public List getTableObjectsByName(String db, List tbl_names) - throws MetaException, UnknownDBException { - List
tables = new ArrayList
(); - boolean committed = false; - try { - openTransaction(); - - db = db.toLowerCase().trim(); - Query dbExistsQuery = pm.newQuery(MDatabase.class, "name == db"); - dbExistsQuery.declareParameters("java.lang.String db"); - dbExistsQuery.setUnique(true); - dbExistsQuery.setResult("name"); - String dbNameIfExists = (String) dbExistsQuery.execute(db); - if (dbNameIfExists == null || dbNameIfExists.isEmpty()) { - throw new UnknownDBException("Could not find database " + db); - } - - List lowered_tbl_names = new ArrayList(); - for (String t : tbl_names) { - lowered_tbl_names.add(t.toLowerCase().trim()); - } - Query query = pm.newQuery(MTable.class); - query.setFilter("database.name == db && tbl_names.contains(tableName)"); - query.declareParameters("java.lang.String db, java.util.Collection tbl_names"); - Collection mtables = (Collection) query.execute(db, lowered_tbl_names); - for (Iterator iter = mtables.iterator(); iter.hasNext();) { - tables.add(convertToTable((MTable) iter.next())); - } - committed = commitTransaction(); - } finally { - if (!committed) { - rollbackTransaction(); - } - } - return tables; - } - - /** Makes shallow copy of a list to avoid DataNucleus mucking with our objects. */ - private List convertList(List dnList) { - return (dnList == null) ? null : Lists.newArrayList(dnList); - } - - /** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */ - private Map convertMap(Map dnMap) { - return (dnMap == null) ? null : Maps.newHashMap(dnMap); - } - - private Table convertToTable(MTable mtbl) throws MetaException { - if (mtbl == null) { - return null; - } - String tableType = mtbl.getTableType(); - if (tableType == null) { - // for backwards compatibility with old metastore persistence - if (mtbl.getViewOriginalText() != null) { - tableType = TableType.VIRTUAL_VIEW.toString(); - } else if ("TRUE".equals(mtbl.getParameters().get("EXTERNAL"))) { - tableType = TableType.EXTERNAL_TABLE.toString(); - } else { - tableType = TableType.MANAGED_TABLE.toString(); - } - } - return new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl - .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl - .getRetention(), convertToStorageDescriptor(mtbl.getSd()), - convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()), - mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType); - } - - private MTable convertToMTable(Table tbl) throws InvalidObjectException, - MetaException { - if (tbl == null) { - return null; - } - MDatabase mdb = null; - try { - mdb = getMDatabase(tbl.getDbName()); - } catch (NoSuchObjectException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new InvalidObjectException("Database " + tbl.getDbName() - + " doesn't exist."); - } - - // If the table has property EXTERNAL set, update table type - // accordingly - String tableType = tbl.getTableType(); - boolean isExternal = "TRUE".equals(tbl.getParameters().get("EXTERNAL")); - if (TableType.MANAGED_TABLE.toString().equals(tableType)) { - if (isExternal) { - tableType = TableType.EXTERNAL_TABLE.toString(); - } - } - if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) { - if (!isExternal) { - tableType = TableType.MANAGED_TABLE.toString(); - } - } - - // A new table is always created with a new column descriptor - return new MTable(tbl.getTableName().toLowerCase(), mdb, - convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl - .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), - convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), - tbl.getViewOriginalText(), tbl.getViewExpandedText(), - tableType); - } - - private List convertToMFieldSchemas(List keys) { - List mkeys = null; - if (keys != null) { - mkeys = new ArrayList(keys.size()); - for (FieldSchema part : keys) { - mkeys.add(new MFieldSchema(part.getName().toLowerCase(), - part.getType(), part.getComment())); - } - } - return mkeys; - } - - private List convertToFieldSchemas(List mkeys) { - List keys = null; - if (mkeys != null) { - keys = new ArrayList(mkeys.size()); - for (MFieldSchema part : mkeys) { - keys.add(new FieldSchema(part.getName(), part.getType(), part - .getComment())); - } - } - return keys; - } - - private List convertToMOrders(List keys) { - List mkeys = null; - if (keys != null) { - mkeys = new ArrayList(keys.size()); - for (Order part : keys) { - mkeys.add(new MOrder(part.getCol().toLowerCase(), part.getOrder())); - } - } - return mkeys; - } - - private List convertToOrders(List mkeys) { - List keys = null; - if (mkeys != null) { - keys = new ArrayList(mkeys.size()); - for (MOrder part : mkeys) { - keys.add(new Order(part.getCol(), part.getOrder())); - } - } - return keys; - } - - private SerDeInfo converToSerDeInfo(MSerDeInfo ms) throws MetaException { - if (ms == null) { - throw new MetaException("Invalid SerDeInfo object"); - } - return new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters())); - } - - private MSerDeInfo converToMSerDeInfo(SerDeInfo ms) throws MetaException { - if (ms == null) { - throw new MetaException("Invalid SerDeInfo object"); - } - return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms - .getParameters()); - } - - /** - * Given a list of model field schemas, create a new model column descriptor. - * @param cols the columns the column descriptor contains - * @return a new column descriptor db-backed object - */ - private MColumnDescriptor createNewMColumnDescriptor(List cols) { - if (cols == null) { - return null; - } - return new MColumnDescriptor(cols); - } - - // MSD and SD should be same objects. Not sure how to make then same right now - // MSerdeInfo *& SerdeInfo should be same as well - private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd, - boolean noFS) - throws MetaException { - if (msd == null) { - return null; - } - List mFieldSchemas = msd.getCD() == null ? null : msd.getCD().getCols(); - - StorageDescriptor sd = new StorageDescriptor(noFS ? null : convertToFieldSchemas(mFieldSchemas), - msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd - .isCompressed(), msd.getNumBuckets(), converToSerDeInfo(msd - .getSerDeInfo()), convertList(msd.getBucketCols()), convertToOrders(msd - .getSortCols()), convertMap(msd.getParameters())); - SkewedInfo skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()), - convertToSkewedValues(msd.getSkewedColValues()), - covertToSkewedMap(msd.getSkewedColValueLocationMaps())); - sd.setSkewedInfo(skewedInfo); - sd.setStoredAsSubDirectories(msd.isStoredAsSubDirectories()); - return sd; - } - - private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) - throws MetaException { - return convertToStorageDescriptor(msd, false); - } - - /** - * Convert a list of MStringList to a list of list string - * - * @param mLists - * @return - */ - private List> convertToSkewedValues(List mLists) { - List> lists = null; - if (mLists != null) { - lists = new ArrayList>(mLists.size()); - for (MStringList element : mLists) { - lists.add(new ArrayList(element.getInternalList())); - } - } - return lists; - } - - private List convertToMStringLists(List> mLists) { - List lists = null ; - if (null != mLists) { - lists = new ArrayList(); - for (List mList : mLists) { - lists.add(new MStringList(mList)); - } - } - return lists; - } - - /** - * Convert a MStringList Map to a Map - * @param mMap - * @return - */ - private Map, String> covertToSkewedMap(Map mMap) { - Map, String> map = null; - if (mMap != null) { - map = new HashMap, String>(mMap.size()); - Set keys = mMap.keySet(); - for (MStringList key : keys) { - map.put(new ArrayList(key.getInternalList()), mMap.get(key)); - } - } - return map; - } - - /** - * Covert a Map to a MStringList Map - * @param mMap - * @return - */ - private Map covertToMapMStringList(Map, String> mMap) { - Map map = null; - if (mMap != null) { - map = new HashMap(mMap.size()); - Set> keys = mMap.keySet(); - for (List key : keys) { - map.put(new MStringList(key), mMap.get(key)); - } - } - return map; - } - - - - /** - * Converts a storage descriptor to a db-backed storage descriptor. Creates a - * new db-backed column descriptor object for this SD. - * @param sd the storage descriptor to wrap in a db-backed object - * @return the storage descriptor db-backed object - * @throws MetaException - */ - private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd) - throws MetaException { - if (sd == null) { - return null; - } - MColumnDescriptor mcd = createNewMColumnDescriptor(convertToMFieldSchemas(sd.getCols())); - return convertToMStorageDescriptor(sd, mcd); - } - - /** - * Converts a storage descriptor to a db-backed storage descriptor. It points the - * storage descriptor's column descriptor to the one passed as an argument, - * so it does not create a new mcolumn descriptor object. - * @param sd the storage descriptor to wrap in a db-backed object - * @param mcd the db-backed column descriptor - * @return the db-backed storage descriptor object - * @throws MetaException - */ - private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd, - MColumnDescriptor mcd) throws MetaException { - if (sd == null) { - return null; - } - return new MStorageDescriptor(mcd, sd - .getLocation(), sd.getInputFormat(), sd.getOutputFormat(), sd - .isCompressed(), sd.getNumBuckets(), converToMSerDeInfo(sd - .getSerdeInfo()), sd.getBucketCols(), - convertToMOrders(sd.getSortCols()), sd.getParameters(), - (null == sd.getSkewedInfo()) ? null - : sd.getSkewedInfo().getSkewedColNames(), - convertToMStringLists((null == sd.getSkewedInfo()) ? null : sd.getSkewedInfo() - .getSkewedColValues()), - covertToMapMStringList((null == sd.getSkewedInfo()) ? null : sd.getSkewedInfo() - .getSkewedColValueLocationMaps()), sd.isStoredAsSubDirectories()); - } - - @Override - public boolean addPartitions(String dbName, String tblName, List parts) - throws InvalidObjectException, MetaException { - boolean success = false; - openTransaction(); - try { - List tabGrants = null; - List tabColumnGrants = null; - MTable table = this.getMTable(dbName, tblName); - if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(dbName, tblName); - tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName); - } - List toPersist = new ArrayList(); - for (Partition part : parts) { - if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) { - throw new MetaException("Partition does not belong to target table " - + dbName + "." + tblName + ": " + part); - } - MPartition mpart = convertToMPart(part, true); - toPersist.add(mpart); - int now = (int)(System.currentTimeMillis()/1000); - if (tabGrants != null) { - for (MTablePrivilege tab: tabGrants) { - toPersist.add(new MPartitionPrivilege(tab.getPrincipalName(), - tab.getPrincipalType(), mpart, tab.getPrivilege(), now, - tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption())); - } - } - - if (tabColumnGrants != null) { - for (MTableColumnPrivilege col : tabColumnGrants) { - toPersist.add(new MPartitionColumnPrivilege(col.getPrincipalName(), - col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(), - now, col.getGrantor(), col.getGrantorType(), col.getGrantOption())); - } - } - } - if (toPersist.size() > 0) { - pm.makePersistentAll(toPersist); - } - - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - @Override - public boolean addPartition(Partition part) throws InvalidObjectException, - MetaException { - boolean success = false; - boolean commited = false; - try { - MTable table = this.getMTable(part.getDbName(), part.getTableName()); - List tabGrants = null; - List tabColumnGrants = null; - if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(part - .getDbName(), part.getTableName()); - tabColumnGrants = this.listTableAllColumnGrants( - part.getDbName(), part.getTableName()); - } - openTransaction(); - MPartition mpart = convertToMPart(part, true); - pm.makePersistent(mpart); - - int now = (int)(System.currentTimeMillis()/1000); - List toPersist = new ArrayList(); - if (tabGrants != null) { - for (MTablePrivilege tab: tabGrants) { - MPartitionPrivilege partGrant = new MPartitionPrivilege(tab - .getPrincipalName(), tab.getPrincipalType(), - mpart, tab.getPrivilege(), now, tab.getGrantor(), tab - .getGrantorType(), tab.getGrantOption()); - toPersist.add(partGrant); - } - } - - if (tabColumnGrants != null) { - for (MTableColumnPrivilege col : tabColumnGrants) { - MPartitionColumnPrivilege partColumn = new MPartitionColumnPrivilege(col - .getPrincipalName(), col.getPrincipalType(), mpart, col - .getColumnName(), col.getPrivilege(), now, col.getGrantor(), col - .getGrantorType(), col.getGrantOption()); - toPersist.add(partColumn); - } - - if (toPersist.size() > 0) { - pm.makePersistentAll(toPersist); - } - } - - commited = commitTransaction(); - success = true; - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return success; - } - - @Override - public Partition getPartition(String dbName, String tableName, - List part_vals) throws NoSuchObjectException, MetaException { - openTransaction(); - Partition part = convertToPart(getMPartition(dbName, tableName, part_vals)); - commitTransaction(); - if(part == null) { - throw new NoSuchObjectException("partition values=" - + part_vals.toString()); - } - part.setValues(part_vals); - return part; - } - - private MPartition getMPartition(String dbName, String tableName, - List part_vals) throws MetaException { - MPartition mpart = null; - boolean commited = false; - try { - openTransaction(); - dbName = dbName.toLowerCase().trim(); - tableName = tableName.toLowerCase().trim(); - MTable mtbl = getMTable(dbName, tableName); - if (mtbl == null) { - commited = commitTransaction(); - return null; - } - // Change the query to use part_vals instead of the name which is - // redundant TODO: callers of this often get part_vals out of name for no reason... - String name = Warehouse.makePartName(convertToFieldSchemas(mtbl - .getPartitionKeys()), part_vals); - Query query = pm.newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2 && partitionName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - query.setUnique(true); - mpart = (MPartition) query.execute(tableName, dbName, name); - pm.retrieve(mpart); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return mpart; - } - - /** - * Convert a Partition object into an MPartition, which is an object backed by the db - * If the Partition's set of columns is the same as the parent table's AND useTableCD - * is true, then this partition's storage descriptor's column descriptor will point - * to the same one as the table's storage descriptor. - * @param part the partition to convert - * @param useTableCD whether to try to use the parent table's column descriptor. - * @return the model partition object - * @throws InvalidObjectException - * @throws MetaException - */ - private MPartition convertToMPart(Partition part, boolean useTableCD) - throws InvalidObjectException, MetaException { - if (part == null) { - return null; - } - MTable mt = getMTable(part.getDbName(), part.getTableName()); - if (mt == null) { - throw new InvalidObjectException( - "Partition doesn't have a valid table or database name"); - } - - // If this partition's set of columns is the same as the parent table's, - // use the parent table's, so we do not create a duplicate column descriptor, - // thereby saving space - MStorageDescriptor msd; - if (useTableCD && - mt.getSd() != null && mt.getSd().getCD() != null && - mt.getSd().getCD().getCols() != null && - part.getSd() != null && - convertToFieldSchemas(mt.getSd().getCD().getCols()). - equals(part.getSd().getCols())) { - msd = convertToMStorageDescriptor(part.getSd(), mt.getSd().getCD()); - } else { - msd = convertToMStorageDescriptor(part.getSd()); - } - - return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt - .getPartitionKeys()), part.getValues()), mt, part.getValues(), part - .getCreateTime(), part.getLastAccessTime(), - msd, part.getParameters()); - } - - private Partition convertToPart(MPartition mpart) throws MetaException { - if (mpart == null) { - return null; - } - return new Partition(convertList(mpart.getValues()), mpart.getTable().getDatabase() - .getName(), mpart.getTable().getTableName(), mpart.getCreateTime(), - mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()), - convertMap(mpart.getParameters())); - } - - private Partition convertToPart(String dbName, String tblName, MPartition mpart) - throws MetaException { - if (mpart == null) { - return null; - } - return new Partition(convertList(mpart.getValues()), dbName, tblName, - mpart.getCreateTime(), mpart.getLastAccessTime(), - convertToStorageDescriptor(mpart.getSd(), false), convertMap(mpart.getParameters())); - } - - @Override - public boolean dropPartition(String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, - InvalidInputException { - boolean success = false; - try { - openTransaction(); - MPartition part = getMPartition(dbName, tableName, part_vals); - dropPartitionCommon(part); - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - @Override - public void dropPartitions(String dbName, String tblName, List partNames) - throws MetaException, NoSuchObjectException { - if (partNames.isEmpty()) return; - boolean success = false; - openTransaction(); - try { - // Delete all things. - dropPartitionGrantsNoTxn(dbName, tblName, partNames); - dropPartitionAllColumnGrantsNoTxn(dbName, tblName, partNames); - dropPartitionColumnStatisticsNoTxn(dbName, tblName, partNames); - - // CDs are reused; go thry partition SDs, detach all CDs from SDs, then remove unused CDs. - for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(dbName, tblName, partNames)) { - removeUnusedColumnDescriptor(mcd); - } - dropPartitionsNoTxn(dbName, tblName, partNames); - if (!(success = commitTransaction())) { - throw new MetaException("Failed to drop partitions"); // Should not happen? - } - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - /** - * Drop an MPartition and cascade deletes (e.g., delete partition privilege grants, - * drop the storage descriptor cleanly, etc.) - * @param part - the MPartition to drop - * @return whether the transaction committed successfully - * @throws InvalidInputException - * @throws InvalidObjectException - * @throws MetaException - * @throws NoSuchObjectException - */ - private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectException, MetaException, - InvalidObjectException, InvalidInputException { - boolean success = false; - try { - openTransaction(); - if (part != null) { - List schemas = part.getTable().getPartitionKeys(); - List colNames = new ArrayList(); - for (MFieldSchema col: schemas) { - colNames.add(col.getName()); - } - String partName = FileUtils.makePartName(colNames, part.getValues()); - - List partGrants = listPartitionGrants( - part.getTable().getDatabase().getName(), - part.getTable().getTableName(), - Lists.newArrayList(partName)); - - if (partGrants != null && partGrants.size() > 0) { - pm.deletePersistentAll(partGrants); - } - - List partColumnGrants = listPartitionAllColumnGrants( - part.getTable().getDatabase().getName(), - part.getTable().getTableName(), - Lists.newArrayList(partName)); - if (partColumnGrants != null && partColumnGrants.size() > 0) { - pm.deletePersistentAll(partColumnGrants); - } - - String dbName = part.getTable().getDatabase().getName(); - String tableName = part.getTable().getTableName(); - - // delete partition level column stats if it exists - try { - deletePartitionColumnStatistics(dbName, tableName, partName, part.getValues(), null); - } catch (NoSuchObjectException e) { - LOG.info("No column statistics records found to delete"); - } - - preDropStorageDescriptor(part.getSd()); - pm.deletePersistent(part); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - @Override - public List getPartitions( - String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { - return getPartitionsInternal(dbName, tableName, maxParts, true, true); - } - - protected List getPartitionsInternal( - String dbName, String tblName, final int maxParts, boolean allowSql, boolean allowJdo) - throws MetaException, NoSuchObjectException { - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - Integer max = (maxParts < 0) ? null : maxParts; - return directSql.getPartitions(dbName, tblName, max); - } - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - return convertToParts(listMPartitions(dbName, tblName, maxParts)); - } - }.run(false); - } - - @Override - public List getPartitionsWithAuth(String dbName, String tblName, - short max, String userName, List groupNames) - throws MetaException, NoSuchObjectException, InvalidObjectException { - boolean success = false; - try { - openTransaction(); - List mparts = listMPartitions(dbName, tblName, max); - List parts = new ArrayList(mparts.size()); - if (mparts != null && mparts.size()>0) { - for (MPartition mpart : mparts) { - MTable mtbl = mpart.getTable(); - Partition part = convertToPart(mpart); - parts.add(part); - - if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl - .getPartitionKeys()), part.getValues()); - PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, - tblName, partName, userName, groupNames); - part.setPrivileges(partAuth); - } - } - } - success = commitTransaction(); - return parts; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @Override - public Partition getPartitionWithAuth(String dbName, String tblName, - List partVals, String user_name, List group_names) - throws NoSuchObjectException, MetaException, InvalidObjectException { - boolean success = false; - try { - openTransaction(); - MPartition mpart = getMPartition(dbName, tblName, partVals); - if (mpart == null) { - commitTransaction(); - throw new NoSuchObjectException("partition values=" - + partVals.toString()); - } - Partition part = null; - MTable mtbl = mpart.getTable(); - part = convertToPart(mpart); - if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl - .getPartitionKeys()), partVals); - PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, - tblName, partName, user_name, group_names); - part.setPrivileges(partAuth); - } - - success = commitTransaction(); - return part; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - - private List convertToParts(List mparts) throws MetaException { - return convertToParts(mparts, null); - } - - private List convertToParts(List src, List dest) - throws MetaException { - if (src == null) { - return dest; - } - if (dest == null) { - dest = new ArrayList(src.size()); - } - for (MPartition mp : src) { - dest.add(convertToPart(mp)); - } - return dest; - } - - private List convertToParts(String dbName, String tblName, List mparts) - throws MetaException { - List parts = new ArrayList(mparts.size()); - for (MPartition mp : mparts) { - parts.add(convertToPart(dbName, tblName, mp)); - } - return parts; - } - - // TODO:pc implement max - @Override - public List listPartitionNames(String dbName, String tableName, - short max) throws MetaException { - List pns = null; - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing getPartitionNames"); - pns = getPartitionNamesNoTxn(dbName, tableName, max); - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return pns; - } - - private List getPartitionNamesNoTxn(String dbName, String tableName, short max) { - List pns = new ArrayList(); - dbName = dbName.toLowerCase().trim(); - tableName = tableName.toLowerCase().trim(); - Query q = pm.newQuery( - "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where table.database.name == t1 && table.tableName == t2 " - + "order by partitionName asc"); - q.declareParameters("java.lang.String t1, java.lang.String t2"); - q.setResult("partitionName"); - - if(max > 0) { - q.setRange(0, max); - } - Collection names = (Collection) q.execute(dbName, tableName); - for (Iterator i = names.iterator(); i.hasNext();) { - pns.add((String) i.next()); - } - return pns; - } - - /** - * Retrieves a Collection of partition-related results from the database that match - * the partial specification given for a specific table. - * @param dbName the name of the database - * @param tableName the name of the table - * @param part_vals the partial specification values - * @param max_parts the maximum number of partitions to return - * @param resultsCol the metadata column of the data to return, e.g. partitionName, etc. - * if resultsCol is empty or null, a collection of MPartition objects is returned - * @throws NoSuchObjectException - * @results A Collection of partition-related items from the db that match the partial spec - * for a table. The type of each item in the collection corresponds to the column - * you want results for. E.g., if resultsCol is partitionName, the Collection - * has types of String, and if resultsCol is null, the types are MPartition. - */ - private Collection getPartitionPsQueryResults(String dbName, String tableName, - List part_vals, short max_parts, String resultsCol) - throws MetaException, NoSuchObjectException { - dbName = dbName.toLowerCase().trim(); - tableName = tableName.toLowerCase().trim(); - Table table = getTable(dbName, tableName); - - if (table == null) { - throw new NoSuchObjectException(dbName + "." + tableName + " table not found"); - } - - List partCols = table.getPartitionKeys(); - int numPartKeys = partCols.size(); - if (part_vals.size() > numPartKeys) { - throw new MetaException("Incorrect number of partition values"); - } - - partCols = partCols.subList(0, part_vals.size()); - //Construct a pattern of the form: partKey=partVal/partKey2=partVal2/... - // where partVal is either the escaped partition value given as input, - // or a regex of the form ".*" - //This works because the "=" and "/" separating key names and partition key/values - // are not escaped. - String partNameMatcher = Warehouse.makePartName(partCols, part_vals, ".*"); - //add ".*" to the regex to match anything else afterwards the partial spec. - if (part_vals.size() < numPartKeys) { - partNameMatcher += ".*"; - } - - Query q = pm.newQuery(MPartition.class); - StringBuilder queryFilter = new StringBuilder("table.database.name == dbName"); - queryFilter.append(" && table.tableName == tableName"); - queryFilter.append(" && partitionName.matches(partialRegex)"); - q.setFilter(queryFilter.toString()); - q.declareParameters("java.lang.String dbName, " + - "java.lang.String tableName, java.lang.String partialRegex"); - - if( max_parts >= 0 ) { - //User specified a row limit, set it on the Query - q.setRange(0, max_parts); - } - if (resultsCol != null && !resultsCol.isEmpty()) { - q.setResult(resultsCol); - } - - return (Collection) q.execute(dbName, tableName, partNameMatcher); - } - - @Override - public List listPartitionsPsWithAuth(String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) - throws MetaException, InvalidObjectException, NoSuchObjectException { - List partitions = new ArrayList(); - boolean success = false; - try { - openTransaction(); - LOG.debug("executing listPartitionNamesPsWithAuth"); - Collection parts = getPartitionPsQueryResults(db_name, tbl_name, - part_vals, max_parts, null); - MTable mtbl = getMTable(db_name, tbl_name); - for (Object o : parts) { - Partition part = convertToPart((MPartition) o); - //set auth privileges - if (null != userName && null != groupNames && - "TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl - .getPartitionKeys()), part.getValues()); - PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(db_name, - tbl_name, partName, userName, groupNames); - part.setPrivileges(partAuth); - } - partitions.add(part); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return partitions; - } - - @Override - public List listPartitionNamesPs(String dbName, String tableName, - List part_vals, short max_parts) throws MetaException, NoSuchObjectException { - List partitionNames = new ArrayList(); - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listPartitionNamesPs"); - Collection names = getPartitionPsQueryResults(dbName, tableName, - part_vals, max_parts, "partitionName"); - for (Object o : names) { - partitionNames.add((String) o); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return partitionNames; - } - - // TODO:pc implement max - private List listMPartitions(String dbName, String tableName, - int max) { - - boolean success = false; - List mparts = null; - try { - openTransaction(); - LOG.debug("Executing listMPartitions"); - dbName = dbName.toLowerCase().trim(); - tableName = tableName.toLowerCase().trim(); - Query query = pm.newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - query.setOrdering("partitionName ascending"); - if(max > 0) { - query.setRange(0, max); - } - mparts = (List) query.execute(tableName, dbName); - LOG.debug("Done executing query for listMPartitions"); - pm.retrieveAll(mparts); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMPartitions " + mparts); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mparts; - } - - @Override - public List getPartitionsByNames(String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { - return getPartitionsByNamesInternal(dbName, tblName, partNames, true, true); - } - - protected List getPartitionsByNamesInternal(String dbName, String tblName, - final List partNames, boolean allowSql, boolean allowJdo) - throws MetaException, NoSuchObjectException { - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null); - } - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPartitionsViaOrmFilter(dbName, tblName, partNames); - } - }.run(false); - } - - @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { - return getPartitionsByExprInternal( - dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true); - } - - protected boolean getPartitionsByExprInternal(String dbName, String tblName, final byte[] expr, - final String defaultPartitionName, final short maxParts, List result, - boolean allowSql, boolean allowJdo) throws TException { - assert result != null; - - // We will try pushdown first, so make the filter. This will also validate the expression, - // if serialization fails we will throw incompatible metastore error to the client. - String filter = null; - try { - filter = expressionProxy.convertExprToFilter(expr); - } catch (MetaException ex) { - throw new IMetaStoreClient.IncompatibleMetastoreException(ex.getMessage()); - } - - // Make a tree out of the filter. - // TODO: this is all pretty ugly. The only reason we need all these transformations - // is to maintain support for simple filters for HCat users that query metastore. - // If forcing everyone to use thick client is out of the question, maybe we could - // parse the filter into standard hive expressions and not all this separate tree - // Filter.g stuff. That way this method and ...ByFilter would just be merged. - final ExpressionTree exprTree = makeExpressionTree(filter); - - final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); - result.addAll(new GetListHelper(dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - // If we have some sort of expression tree, try SQL filter pushdown. - List result = null; - if (exprTree != null) { - result = directSql.getPartitionsViaSqlFilter(ctx.getTable(), exprTree, null); - } - if (result == null) { - // We couldn't do SQL filter pushdown. Get names via normal means. - List partNames = new LinkedList(); - hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( - ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); - result = directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null); - } - return result; - } - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - // If we have some sort of expression tree, try JDOQL filter pushdown. - List result = null; - if (exprTree != null) { - result = getPartitionsViaOrmFilter(ctx.getTable(), exprTree, maxParts, false); - } - if (result == null) { - // We couldn't do JDOQL filter pushdown. Get names via normal means. - List partNames = new ArrayList(); - hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( - ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); - result = getPartitionsViaOrmFilter(dbName, tblName, partNames); - } - return result; - } - }.run(true)); - return hasUnknownPartitions.get(); - } - - private class LikeChecker extends ExpressionTree.TreeVisitor { - private boolean hasLike; - - public boolean hasLike() { - return hasLike; - } - - @Override - protected boolean shouldStop() { - return hasLike; - } - - @Override - protected void visit(LeafNode node) throws MetaException { - hasLike = hasLike || (node.operator == Operator.LIKE); - } - } - - /** - * Makes expression tree out of expr. - * @param filter Filter. - * @return Expression tree. Null if there was an error. - */ - private ExpressionTree makeExpressionTree(String filter) throws MetaException { - // TODO: ExprNodeDesc is an expression tree, we could just use that and be rid of Filter.g. - if (filter == null || filter.isEmpty()) { - return ExpressionTree.EMPTY_TREE; - } - LOG.debug("Filter specified is " + filter); - ExpressionTree tree = null; - try { - tree = getFilterParser(filter).tree; - } catch (MetaException ex) { - LOG.info("Unable to make the expression tree from expression string [" - + filter + "]" + ex.getMessage()); // Don't log the stack, this is normal. - } - if (tree == null) { - return null; - } - // We suspect that LIKE pushdown into JDO is invalid; see HIVE-5134. Check for like here. - LikeChecker lc = new LikeChecker(); - tree.accept(lc); - return lc.hasLike() ? null : tree; - } - - /** - * Gets the partition names from a table, pruned using an expression. - * @param table Table. - * @param expr Expression. - * @param defaultPartName Default partition name from job config, if any. - * @param maxParts Maximum number of partition names to return. - * @param result The resulting names. - * @return Whether the result contains any unknown partitions. - */ - private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, - String defaultPartName, short maxParts, List result) throws MetaException { - result.addAll(getPartitionNamesNoTxn( - table.getDbName(), table.getTableName(), maxParts)); - List columnNames = new ArrayList(); - for (FieldSchema fs : table.getPartitionKeys()) { - columnNames.add(fs.getName()); - } - if (defaultPartName == null || defaultPartName.isEmpty()) { - defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); - } - return expressionProxy.filterPartitionsByExpr( - columnNames, expr, defaultPartName, result); - } - - /** - * Gets partition names from the table via ORM (JDOQL) filter pushdown. - * @param table The table. - * @param tree The expression tree from which JDOQL filter will be made. - * @param maxParts Maximum number of partitions to return. - * @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown by a client - * (old hive client or non-hive one); if it was and we fail to create a filter, we will throw. - * @return Resulting partitions. Can be null if isValidatedFilter is false, and - * there was error deriving the JDO filter. - */ - private List getPartitionsViaOrmFilter(Table table, ExpressionTree tree, - short maxParts, boolean isValidatedFilter) throws MetaException { - Map params = new HashMap(); - String jdoFilter = makeQueryFilterString( - table.getDbName(), table, tree, params, isValidatedFilter); - if (jdoFilter == null) { - assert !isValidatedFilter; - return null; - } - Query query = pm.newQuery(MPartition.class, jdoFilter); - if (maxParts >= 0) { - // User specified a row limit, set it on the Query - query.setRange(0, maxParts); - } - - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setOrdering("partitionName ascending"); - - @SuppressWarnings("unchecked") - List mparts = (List) query.executeWithMap(params); - - LOG.debug("Done executing query for getPartitionsViaOrmFilter"); - pm.retrieveAll(mparts); // TODO: why is this inconsistent with what we get by names? - LOG.debug("Done retrieving all objects for getPartitionsViaOrmFilter"); - List results = convertToParts(mparts); - query.closeAll(); - return results; - } - - private static class Out { - public T val; - } - - /** - * Gets partition names from the table via ORM (JDOQL) name filter. - * @param dbName Database name. - * @param tblName Table name. - * @param partNames Partition names to get the objects for. - * @return Resulting partitions. - */ - private List getPartitionsViaOrmFilter( - String dbName, String tblName, List partNames) throws MetaException { - if (partNames.isEmpty()) { - return new ArrayList(); - } - Out query = new Out(); - List mparts = null; - try { - mparts = getMPartitionsViaOrmFilter(dbName, tblName, partNames, query); - return convertToParts(dbName, tblName, mparts); - } finally { - if (query.val != null) { - query.val.closeAll(); - } - } - } - - private void dropPartitionsNoTxn(String dbName, String tblName, List partNames) { - ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); - Query query = queryWithParams.getFirst(); - query.setClass(MPartition.class); - long deleted = query.deletePersistentAll(queryWithParams.getSecond()); - LOG.debug("Deleted " + deleted + " partition from store"); - query.closeAll(); - } - - /** - * Detaches column descriptors from storage descriptors; returns the set of unique CDs - * thus detached. This is done before dropping partitions because CDs are reused between - * SDs; so, we remove the links to delete SDs and then check the returned CDs to see if - * they are referenced by other SDs. - */ - private HashSet detachCdsFromSdsNoTxn( - String dbName, String tblName, List partNames) { - ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); - Query query = queryWithParams.getFirst(); - query.setClass(MPartition.class); - query.setResult("sd"); - @SuppressWarnings("unchecked") - List sds = (List)query.executeWithMap( - queryWithParams.getSecond()); - HashSet candidateCds = new HashSet(); - for (MStorageDescriptor sd : sds) { - if (sd != null && sd.getCD() != null) { - candidateCds.add(sd.getCD()); - sd.setCD(null); - } - } - return candidateCds; - } - - private List getMPartitionsViaOrmFilter(String dbName, - String tblName, List partNames, Out out) { - ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); - Query query = out.val = queryWithParams.getFirst(); - query.setResultClass(MPartition.class); - query.setClass(MPartition.class); - query.setOrdering("partitionName ascending"); - - @SuppressWarnings("unchecked") - List result = (List)query.executeWithMap(queryWithParams.getSecond()); - return result; - } - - private ObjectPair> getPartQueryWithParams( - String dbName, String tblName, List partNames) { - StringBuilder sb = new StringBuilder( - "table.tableName == t1 && table.database.name == t2 && ("); - int n = 0; - Map params = new HashMap(); - for (Iterator itr = partNames.iterator(); itr.hasNext();) { - String pn = "p" + n; - n++; - String part = itr.next(); - params.put(pn, part); - sb.append("partitionName == ").append(pn); - sb.append(" || "); - } - sb.setLength(sb.length() - 4); // remove the last " || " - sb.append(')'); - - Query query = pm.newQuery(); - query.setFilter(sb.toString()); - - LOG.debug(" JDOQL filter is " + sb.toString()); - params.put("t1", tblName.trim().toLowerCase()); - params.put("t2", dbName.trim().toLowerCase()); - - query.declareParameters(makeParameterDeclarationString(params)); - return new ObjectPair>(query, params); - } - - @Override - public List getPartitionsByFilter(String dbName, String tblName, - String filter, short maxParts) throws MetaException, NoSuchObjectException { - return getPartitionsByFilterInternal(dbName, tblName, filter, maxParts, true, true); - } - - /** Helper class for getting stuff w/transaction, direct SQL, perf logging, etc. */ - private abstract class GetHelper { - private final boolean isInTxn, doTrace, allowJdo; - private boolean doUseDirectSql; - private long start; - private Table table; - protected final String dbName, tblName; - private boolean success = false; - protected T results = null; - - public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJdo) - throws MetaException { - assert allowSql || allowJdo; - this.allowJdo = allowJdo; - this.dbName = dbName.toLowerCase(); - this.tblName = tblName.toLowerCase(); - this.doTrace = LOG.isDebugEnabled(); - this.isInTxn = isActiveTransaction(); - - // SQL usage inside a larger transaction (e.g. droptable) may not be desirable because - // some databases (e.g. Postgres) abort the entire transaction when any query fails, so - // the fallback from failed SQL to JDO is not possible. - boolean isConfigEnabled = HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL) - && (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL_DDL) || !isInTxn); - if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) { - throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken. - } - this.doUseDirectSql = allowSql && isConfigEnabled && directSql.isCompatibleDatastore(); - } - - protected abstract String describeResult(); - protected abstract T getSqlResult(GetHelper ctx) throws MetaException; - protected abstract T getJdoResult( - GetHelper ctx) throws MetaException, NoSuchObjectException; - - public T run(boolean initTable) throws MetaException, NoSuchObjectException { - try { - start(initTable); - if (doUseDirectSql) { - try { - setResult(getSqlResult(this)); - } catch (Exception ex) { - handleDirectSqlError(ex); - } - } - if (!doUseDirectSql) { - setResult(getJdoResult(this)); - } - return commit(); - } catch (NoSuchObjectException ex) { - throw ex; - } catch (MetaException ex) { - throw ex; - } catch (Exception ex) { - LOG.error("", ex); - throw new MetaException(ex.getMessage()); - } finally { - close(); - } - } - - private void start(boolean initTable) throws MetaException, NoSuchObjectException { - start = doTrace ? System.nanoTime() : 0; - openTransaction(); - if (initTable) { - table = ensureGetTable(dbName, tblName); - } - } - - private boolean setResult(T results) { - this.results = results; - return this.results != null; - } - - private void handleDirectSqlError(Exception ex) throws MetaException, NoSuchObjectException { - LOG.error("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : ""), ex); - if (!allowJdo) { - if (ex instanceof MetaException) { - throw (MetaException)ex; - } - throw new MetaException(ex.getMessage()); - } - if (!isInTxn) { - rollbackTransaction(); - start = doTrace ? System.nanoTime() : 0; - openTransaction(); - if (table != null) { - table = ensureGetTable(dbName, tblName); - } - } else { - start = doTrace ? System.nanoTime() : 0; - } - doUseDirectSql = false; - } - - public void disableDirectSql() { - this.doUseDirectSql = false; - } - - private T commit() { - success = commitTransaction(); - if (doTrace) { - LOG.debug(describeResult() + " retrieved using " + (doUseDirectSql ? "SQL" : "ORM") - + " in " + ((System.nanoTime() - start) / 1000000.0) + "ms"); - } - return results; - } - - private void close() { - if (!success) { - rollbackTransaction(); - } - } - - public Table getTable() { - return table; - } - } - - private abstract class GetListHelper extends GetHelper> { - public GetListHelper( - String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { - super(dbName, tblName, allowSql, allowJdo); - } - - @Override - protected String describeResult() { - return results.size() + " entries"; - } - } - - private abstract class GetStatHelper extends GetHelper { - public GetStatHelper( - String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { - super(dbName, tblName, allowSql, allowJdo); - } - - @Override - protected String describeResult() { - return "statistics for " + (results == null ? 0 : results.getStatsObjSize()) + " columns"; - } - } - - protected List getPartitionsByFilterInternal(String dbName, String tblName, - String filter, final short maxParts, boolean allowSql, boolean allowJdo) - throws MetaException, NoSuchObjectException { - final ExpressionTree tree = (filter != null && !filter.isEmpty()) - ? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - List parts = directSql.getPartitionsViaSqlFilter( - ctx.getTable(), tree, (maxParts < 0) ? null : (int)maxParts); - if (parts == null) { - // Cannot push down SQL filter. The message has been logged internally. - // This is not an error so don't roll back, just go to JDO. - ctx.disableDirectSql(); - } - return parts; - } - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPartitionsViaOrmFilter(ctx.getTable(), tree, maxParts, true); - } - }.run(true); - } - - /** - * Gets the table object for a given table, throws if anything goes wrong. - * @param dbName Database name. - * @param tblName Table name. - * @return Table object. - */ - private MTable ensureGetMTable( - String dbName, String tblName) throws NoSuchObjectException, MetaException { - MTable mtable = getMTable(dbName, tblName); - if (mtable == null) { - throw new NoSuchObjectException("Specified database/table does not exist : " - + dbName + "." + tblName); - } - return mtable; - } - - private Table ensureGetTable( - String dbName, String tblName) throws NoSuchObjectException, MetaException { - return convertToTable(ensureGetMTable(dbName, tblName)); - } - - private FilterParser getFilterParser(String filter) throws MetaException { - FilterLexer lexer = new FilterLexer(new ANTLRNoCaseStringStream(filter)); - CommonTokenStream tokens = new CommonTokenStream(lexer); - - FilterParser parser = new FilterParser(tokens); - try { - parser.filter(); - } catch(RecognitionException re) { - throw new MetaException("Error parsing partition filter; lexer error: " - + lexer.errorMsg + "; exception " + re); - } - - if (lexer.errorMsg != null) { - throw new MetaException("Error parsing partition filter : " + lexer.errorMsg); - } - return parser; - } - - /** - * Makes a JDO query filter string. - * Makes a JDO query filter string for tables or partitions. - * @param dbName Database name. - * @param table Table. If null, the query returned is over tables in a database. - * If not null, the query returned is over partitions in a table. - * @param filter The filter from which JDOQL filter will be made. - * @param params Parameters for the filter. Some parameters may be added here. - * @return Resulting filter. - */ - private String makeQueryFilterString(String dbName, MTable mtable, String filter, - Map params) throws MetaException { - ExpressionTree tree = (filter != null && !filter.isEmpty()) - ? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - return makeQueryFilterString(dbName, convertToTable(mtable), tree, params, true); - } - - /** - * Makes a JDO query filter string for tables or partitions. - * @param dbName Database name. - * @param table Table. If null, the query returned is over tables in a database. - * If not null, the query returned is over partitions in a table. - * @param tree The expression tree from which JDOQL filter will be made. - * @param params Parameters for the filter. Some parameters may be added here. - * @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown - * by the client; if it was and we fail to create a filter, we will throw. - * @return Resulting filter. Can be null if isValidatedFilter is false, and there was error. - */ - private String makeQueryFilterString(String dbName, Table table, ExpressionTree tree, - Map params, boolean isValidatedFilter) throws MetaException { - assert tree != null; - FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter); - if (table != null) { - queryBuilder.append("table.tableName == t1 && table.database.name == t2"); - params.put("t1", table.getTableName()); - params.put("t2", table.getDbName()); - } else { - queryBuilder.append("database.name == dbName"); - params.put("dbName", dbName); - } - - tree.generateJDOFilterFragment(getConf(), table, params, queryBuilder); - if (queryBuilder.hasError()) { - assert !isValidatedFilter; - LOG.info("JDO filter pushdown cannot be used: " + queryBuilder.getErrorMessage()); - return null; - } - String jdoFilter = queryBuilder.getFilter(); - LOG.debug("jdoFilter = " + jdoFilter); - return jdoFilter; - } - - private String makeParameterDeclarationString(Map params) { - //Create the parameter declaration string - StringBuilder paramDecl = new StringBuilder(); - for (String key : params.keySet()) { - paramDecl.append(", java.lang.String " + key); - } - return paramDecl.toString(); - } - - private String makeParameterDeclarationStringObj(Map params) { - //Create the parameter declaration string - StringBuilder paramDecl = new StringBuilder(); - for (Entry entry : params.entrySet()) { - paramDecl.append(", "); - paramDecl.append(entry.getValue().getClass().getName()); - paramDecl.append(" "); - paramDecl.append(entry.getKey()); - } - return paramDecl.toString(); - } - - @Override - public List listTableNamesByFilter(String dbName, String filter, short maxTables) - throws MetaException { - boolean success = false; - List tableNames = new ArrayList(); - try { - openTransaction(); - LOG.debug("Executing listTableNamesByFilter"); - dbName = dbName.toLowerCase().trim(); - Map params = new HashMap(); - String queryFilterString = makeQueryFilterString(dbName, null, filter, params); - Query query = pm.newQuery(MTable.class); - query.declareImports("import java.lang.String"); - query.setResult("tableName"); - query.setResultClass(java.lang.String.class); - if (maxTables >= 0) { - query.setRange(0, maxTables); - } - LOG.debug("filter specified is " + filter + "," + " JDOQL filter is " + queryFilterString); - for (Entry entry : params.entrySet()) { - LOG.debug("key: " + entry.getKey() + " value: " + entry.getValue() + - " class: " + entry.getValue().getClass().getName()); - } - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setFilter(queryFilterString); - Collection names = (Collection) query.executeWithMap(params); - //have to emulate "distinct", otherwise tables with the same name may be returned - Set tableNamesSet = new HashSet(); - for (Iterator i = names.iterator(); i.hasNext();) { - tableNamesSet.add((String) i.next()); - } - tableNames = new ArrayList(tableNamesSet); - LOG.debug("Done executing query for listTableNamesByFilter"); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listTableNamesByFilter"); - - } finally { - if (!success) { - rollbackTransaction(); - } - } - return tableNames; - } - - @Override - public List listPartitionNamesByFilter(String dbName, String tableName, - String filter, short maxParts) throws MetaException { - boolean success = false; - List partNames = new ArrayList(); - try { - openTransaction(); - LOG.debug("Executing listMPartitionNamesByFilter"); - dbName = dbName.toLowerCase(); - tableName = tableName.toLowerCase(); - - MTable mtable = getMTable(dbName, tableName); - if( mtable == null ) { - // To be consistent with the behavior of listPartitionNames, if the - // table or db does not exist, we return an empty list - return partNames; - } - Map params = new HashMap(); - String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params); - Query query = pm.newQuery( - "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where " + queryFilterString); - - if( maxParts >= 0 ) { - //User specified a row limit, set it on the Query - query.setRange(0, maxParts); - } - - LOG.debug("Filter specified is " + filter + "," + - " JDOQL filter is " + queryFilterString); - LOG.debug("Parms is " + params); - - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setOrdering("partitionName ascending"); - query.setResult("partitionName"); - - Collection names = (Collection) query.executeWithMap(params); - partNames = new ArrayList(); - for (Iterator i = names.iterator(); i.hasNext();) { - partNames.add((String) i.next()); - } - - LOG.debug("Done executing query for listMPartitionNamesByFilter"); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return partNames; - } - - @Override - public void alterTable(String dbname, String name, Table newTable) - throws InvalidObjectException, MetaException { - boolean success = false; - try { - openTransaction(); - name = name.toLowerCase(); - dbname = dbname.toLowerCase(); - MTable newt = convertToMTable(newTable); - if (newt == null) { - throw new InvalidObjectException("new table is invalid"); - } - - MTable oldt = getMTable(dbname, name); - if (oldt == null) { - throw new MetaException("table " + name + " doesn't exist"); - } - - // For now only alter name, owner, paramters, cols, bucketcols are allowed - oldt.setTableName(newt.getTableName().toLowerCase()); - oldt.setParameters(newt.getParameters()); - oldt.setOwner(newt.getOwner()); - // Fully copy over the contents of the new SD into the old SD, - // so we don't create an extra SD in the metastore db that has no references. - copyMSD(newt.getSd(), oldt.getSd()); - oldt.setDatabase(newt.getDatabase()); - oldt.setRetention(newt.getRetention()); - oldt.setPartitionKeys(newt.getPartitionKeys()); - oldt.setTableType(newt.getTableType()); - oldt.setLastAccessTime(newt.getLastAccessTime()); - oldt.setViewOriginalText(newt.getViewOriginalText()); - oldt.setViewExpandedText(newt.getViewExpandedText()); - - // commit the changes - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @Override - public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) - throws InvalidObjectException, MetaException { - boolean success = false; - try { - openTransaction(); - name = name.toLowerCase(); - baseTblName = baseTblName.toLowerCase(); - dbname = dbname.toLowerCase(); - MIndex newi = convertToMIndex(newIndex); - if (newi == null) { - throw new InvalidObjectException("new index is invalid"); - } - - MIndex oldi = getMIndex(dbname, baseTblName, name); - if (oldi == null) { - throw new MetaException("index " + name + " doesn't exist"); - } - - // For now only alter paramters are allowed - oldi.setParameters(newi.getParameters()); - - // commit the changes - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - private void alterPartitionNoTxn(String dbname, String name, List part_vals, - Partition newPart) throws InvalidObjectException, MetaException { - name = name.toLowerCase(); - dbname = dbname.toLowerCase(); - MPartition oldp = getMPartition(dbname, name, part_vals); - MPartition newp = convertToMPart(newPart, false); - if (oldp == null || newp == null) { - throw new InvalidObjectException("partition does not exist."); - } - oldp.setValues(newp.getValues()); - oldp.setPartitionName(newp.getPartitionName()); - oldp.setParameters(newPart.getParameters()); - if (!TableType.VIRTUAL_VIEW.name().equals(oldp.getTable().getTableType())) { - copyMSD(newp.getSd(), oldp.getSd()); - } - if (newp.getCreateTime() != oldp.getCreateTime()) { - oldp.setCreateTime(newp.getCreateTime()); - } - if (newp.getLastAccessTime() != oldp.getLastAccessTime()) { - oldp.setLastAccessTime(newp.getLastAccessTime()); - } - } - - @Override - public void alterPartition(String dbname, String name, List part_vals, Partition newPart) - throws InvalidObjectException, MetaException { - boolean success = false; - Exception e = null; - try { - openTransaction(); - alterPartitionNoTxn(dbname, name, part_vals, newPart); - // commit the changes - success = commitTransaction(); - } catch (Exception exception) { - e = exception; - } finally { - if (!success) { - rollbackTransaction(); - MetaException metaException = new MetaException( - "The transaction for alter partition did not commit successfully."); - if (e != null) { - metaException.initCause(e); - } - throw metaException; - } - } - } - - @Override - public void alterPartitions(String dbname, String name, List> part_vals, - List newParts) throws InvalidObjectException, MetaException { - boolean success = false; - Exception e = null; - try { - openTransaction(); - Iterator> part_val_itr = part_vals.iterator(); - for (Partition tmpPart: newParts) { - List tmpPartVals = part_val_itr.next(); - alterPartitionNoTxn(dbname, name, tmpPartVals, tmpPart); - } - // commit the changes - success = commitTransaction(); - } catch (Exception exception) { - e = exception; - } finally { - if (!success) { - rollbackTransaction(); - MetaException metaException = new MetaException( - "The transaction for alter partition did not commit successfully."); - if (e != null) { - metaException.initCause(e); - } - throw metaException; - } - } - } - - private void copyMSD(MStorageDescriptor newSd, MStorageDescriptor oldSd) { - oldSd.setLocation(newSd.getLocation()); - MColumnDescriptor oldCD = oldSd.getCD(); - // If the columns of the old column descriptor != the columns of the new one, - // then change the old storage descriptor's column descriptor. - // Convert the MFieldSchema's to their thrift object counterparts, because we maintain - // datastore identity (i.e., identity of the model objects are managed by JDO, - // not the application). - if (!(oldSd != null && oldSd.getCD() != null && - oldSd.getCD().getCols() != null && - newSd != null && newSd.getCD() != null && - newSd.getCD().getCols() != null && - convertToFieldSchemas(newSd.getCD().getCols()). - equals(convertToFieldSchemas(oldSd.getCD().getCols())) - )) { - oldSd.setCD(newSd.getCD()); - } - - //If oldCd does not have any more references, then we should delete it - // from the backend db - removeUnusedColumnDescriptor(oldCD); - oldSd.setBucketCols(newSd.getBucketCols()); - oldSd.setCompressed(newSd.isCompressed()); - oldSd.setInputFormat(newSd.getInputFormat()); - oldSd.setOutputFormat(newSd.getOutputFormat()); - oldSd.setNumBuckets(newSd.getNumBuckets()); - oldSd.getSerDeInfo().setName(newSd.getSerDeInfo().getName()); - oldSd.getSerDeInfo().setSerializationLib( - newSd.getSerDeInfo().getSerializationLib()); - oldSd.getSerDeInfo().setParameters(newSd.getSerDeInfo().getParameters()); - oldSd.setSkewedColNames(newSd.getSkewedColNames()); - oldSd.setSkewedColValues(newSd.getSkewedColValues()); - oldSd.setSkewedColValueLocationMaps(newSd.getSkewedColValueLocationMaps()); - oldSd.setSortCols(newSd.getSortCols()); - oldSd.setParameters(newSd.getParameters()); - oldSd.setStoredAsSubDirectories(newSd.isStoredAsSubDirectories()); - } - - /** - * Checks if a column descriptor has any remaining references by storage descriptors - * in the db. If it does not, then delete the CD. If it does, then do nothing. - * @param oldCD the column descriptor to delete if it is no longer referenced anywhere - */ - private void removeUnusedColumnDescriptor(MColumnDescriptor oldCD) { - if (oldCD == null) { - return; - } - - boolean success = false; - try { - openTransaction(); - LOG.debug("execute removeUnusedColumnDescriptor"); - List referencedSDs = listStorageDescriptorsWithCD(oldCD, 1); - //if no other SD references this CD, we can throw it out. - if (referencedSDs != null && referencedSDs.isEmpty()) { - pm.retrieve(oldCD); - pm.deletePersistent(oldCD); - } - success = commitTransaction(); - LOG.debug("successfully deleted a CD in removeUnusedColumnDescriptor"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - /** - * Called right before an action that would drop a storage descriptor. - * This function makes the SD's reference to a CD null, and then deletes the CD - * if it no longer is referenced in the table. - * @param msd the storage descriptor to drop - */ - private void preDropStorageDescriptor(MStorageDescriptor msd) { - if (msd == null || msd.getCD() == null) { - return; - } - - MColumnDescriptor mcd = msd.getCD(); - // Because there is a 1-N relationship between CDs and SDs, - // we must set the SD's CD to null first before dropping the storage descriptor - // to satisfy foriegn key constraints. - msd.setCD(null); - removeUnusedColumnDescriptor(mcd); - } - - /** - * Get a list of storage descriptors that reference a particular Column Descriptor - * @param oldCD the column descriptor to get storage descriptors for - * @param maxSDs the maximum number of SDs to return - * @return a list of storage descriptors - */ - private List listStorageDescriptorsWithCD(MColumnDescriptor oldCD, - long maxSDs) { - boolean success = false; - List sds = null; - try { - openTransaction(); - LOG.debug("Executing listStorageDescriptorsWithCD"); - Query query = pm.newQuery(MStorageDescriptor.class, - "this.cd == inCD"); - query.declareParameters("MColumnDescriptor inCD"); - if(maxSDs >= 0) { - //User specified a row limit, set it on the Query - query.setRange(0, maxSDs); - } - sds = (List) query.execute(oldCD); - LOG.debug("Done executing query for listStorageDescriptorsWithCD"); - pm.retrieveAll(sds); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listStorageDescriptorsWithCD"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return sds; - } - - @Override - public boolean addIndex(Index index) throws InvalidObjectException, - MetaException { - boolean commited = false; - try { - openTransaction(); - MIndex idx = convertToMIndex(index); - pm.makePersistent(idx); - commited = commitTransaction(); - return true; - } finally { - if (!commited) { - rollbackTransaction(); - return false; - } - } - } - - private MIndex convertToMIndex(Index index) throws InvalidObjectException, - MetaException { - - StorageDescriptor sd = index.getSd(); - if (sd == null) { - throw new InvalidObjectException("Storage descriptor is not defined for index."); - } - - MStorageDescriptor msd = this.convertToMStorageDescriptor(sd); - MTable origTable = getMTable(index.getDbName(), index.getOrigTableName()); - if (origTable == null) { - throw new InvalidObjectException( - "Original table does not exist for the given index."); - } - - MTable indexTable = getMTable(index.getDbName(), index.getIndexTableName()); - if (indexTable == null) { - throw new InvalidObjectException( - "Underlying index table does not exist for the given index."); - } - - return new MIndex(index.getIndexName().toLowerCase(), origTable, index.getCreateTime(), - index.getLastAccessTime(), index.getParameters(), indexTable, msd, - index.getIndexHandlerClass(), index.isDeferredRebuild()); - } - - @Override - public boolean dropIndex(String dbName, String origTableName, String indexName) - throws MetaException { - boolean success = false; - try { - openTransaction(); - MIndex index = getMIndex(dbName, origTableName, indexName); - if (index != null) { - pm.deletePersistent(index); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - private MIndex getMIndex(String dbName, String originalTblName, String indexName) throws MetaException { - MIndex midx = null; - boolean commited = false; - try { - openTransaction(); - dbName = dbName.toLowerCase().trim(); - originalTblName = originalTblName.toLowerCase().trim(); - MTable mtbl = getMTable(dbName, originalTblName); - if (mtbl == null) { - commited = commitTransaction(); - return null; - } - - Query query = pm.newQuery(MIndex.class, - "origTable.tableName == t1 && origTable.database.name == t2 && indexName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - query.setUnique(true); - midx = (MIndex) query.execute(originalTblName, dbName, indexName.toLowerCase()); - pm.retrieve(midx); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return midx; - } - - @Override - public Index getIndex(String dbName, String origTableName, String indexName) - throws MetaException { - openTransaction(); - MIndex mIndex = this.getMIndex(dbName, origTableName, indexName); - Index ret = convertToIndex(mIndex); - commitTransaction(); - return ret; - } - - private Index convertToIndex(MIndex mIndex) throws MetaException { - if(mIndex == null) { - return null; - } - - return new Index( - mIndex.getIndexName(), - mIndex.getIndexHandlerClass(), - mIndex.getOrigTable().getDatabase().getName(), - mIndex.getOrigTable().getTableName(), - mIndex.getCreateTime(), - mIndex.getLastAccessTime(), - mIndex.getIndexTable().getTableName(), - this.convertToStorageDescriptor(mIndex.getSd()), - mIndex.getParameters(), - mIndex.getDeferredRebuild()); - - } - - @Override - public List getIndexes(String dbName, String origTableName, int max) - throws MetaException { - boolean success = false; - try { - openTransaction(); - List mIndexList = listMIndexes(dbName, origTableName, max); - List indexes = new ArrayList(mIndexList.size()); - for (MIndex midx : mIndexList) { - indexes.add(this.convertToIndex(midx)); - } - success = commitTransaction(); - return indexes; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - private List listMIndexes(String dbName, String origTableName, - int max) { - boolean success = false; - List mindexes = null; - try { - openTransaction(); - LOG.debug("Executing listMIndexes"); - dbName = dbName.toLowerCase().trim(); - origTableName = origTableName.toLowerCase().trim(); - Query query = pm.newQuery(MIndex.class, - "origTable.tableName == t1 && origTable.database.name == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mindexes = (List) query.execute(origTableName, dbName); - LOG.debug("Done executing query for listMIndexes"); - pm.retrieveAll(mindexes); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMIndexes"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mindexes; - } - - @Override - public List listIndexNames(String dbName, String origTableName, - short max) throws MetaException { - List pns = new ArrayList(); - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listIndexNames"); - dbName = dbName.toLowerCase().trim(); - origTableName = origTableName.toLowerCase().trim(); - Query q = pm.newQuery( - "select indexName from org.apache.hadoop.hive.metastore.model.MIndex " - + "where origTable.database.name == t1 && origTable.tableName == t2 " - + "order by indexName asc"); - q.declareParameters("java.lang.String t1, java.lang.String t2"); - q.setResult("indexName"); - Collection names = (Collection) q.execute(dbName, origTableName); - for (Iterator i = names.iterator(); i.hasNext();) { - pns.add((String) i.next()); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return pns; - } - - @Override - public boolean addRole(String roleName, String ownerName) - throws InvalidObjectException, MetaException, NoSuchObjectException { - boolean success = false; - boolean commited = false; - try { - openTransaction(); - MRole nameCheck = this.getMRole(roleName); - if (nameCheck != null) { - throw new InvalidObjectException("Role " + roleName + " already exists."); - } - int now = (int)(System.currentTimeMillis()/1000); - MRole mRole = new MRole(roleName, now, - ownerName); - pm.makePersistent(mRole); - commited = commitTransaction(); - success = true; - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return success; - } - - @Override - public boolean grantRole(Role role, String userName, - PrincipalType principalType, String grantor, PrincipalType grantorType, - boolean grantOption) throws MetaException, NoSuchObjectException,InvalidObjectException { - boolean success = false; - boolean commited = false; - try { - openTransaction(); - MRoleMap roleMap = null; - try { - roleMap = this.getMSecurityUserRoleMap(userName, principalType, role - .getRoleName()); - } catch (Exception e) { - } - if (roleMap != null) { - throw new InvalidObjectException("Principal " + userName - + " already has the role " + role.getRoleName()); - } - if (principalType == PrincipalType.ROLE) { - validateRole(userName); - } - MRole mRole = getMRole(role.getRoleName()); - long now = System.currentTimeMillis()/1000; - MRoleMap roleMember = new MRoleMap(userName, principalType.toString(), - mRole, (int) now, grantor, grantorType.toString(), grantOption); - pm.makePersistent(roleMember); - commited = commitTransaction(); - success = true; - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return success; - } - - /** - * Verify that role with given name exists, if not throw exception - * @param roleName - * @throws NoSuchObjectException - */ - private void validateRole(String roleName) throws NoSuchObjectException { - // if grantee is a role, check if it exists - MRole granteeRole = getMRole(roleName); - if (granteeRole == null) { - throw new NoSuchObjectException("Role " + roleName + " does not exist"); - } - } - - @Override - public boolean revokeRole(Role role, String userName, PrincipalType principalType, - boolean grantOption) throws MetaException, NoSuchObjectException { - boolean success = false; - try { - openTransaction(); - MRoleMap roleMember = getMSecurityUserRoleMap(userName, principalType, - role.getRoleName()); - if (grantOption) { - // Revoke with grant option - only remove the grant option but keep the role. - if (roleMember.getGrantOption()) { - roleMember.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with role " + role.getRoleName()); - } - } else { - // No grant option in revoke, remove the whole role. - pm.deletePersistent(roleMember); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - private MRoleMap getMSecurityUserRoleMap(String userName, - PrincipalType principalType, String roleName) { - MRoleMap mRoleMember = null; - boolean commited = false; - try { - openTransaction(); - Query query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2 && role.roleName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - query.setUnique(true); - mRoleMember = (MRoleMap) query.executeWithArray(userName, principalType.toString(), roleName); - pm.retrieve(mRoleMember); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return mRoleMember; - } - - @Override - public boolean removeRole(String roleName) throws MetaException, - NoSuchObjectException { - boolean success = false; - try { - openTransaction(); - MRole mRol = getMRole(roleName); - pm.retrieve(mRol); - if (mRol != null) { - // first remove all the membership, the membership that this role has - // been granted - List roleMap = listRoleMembers(mRol.getRoleName()); - if (roleMap.size() > 0) { - pm.deletePersistentAll(roleMap); - } - List roleMember = listMSecurityPrincipalMembershipRole(mRol - .getRoleName(), PrincipalType.ROLE); - if (roleMember.size() > 0) { - pm.deletePersistentAll(roleMember); - } - // then remove all the grants - List userGrants = listPrincipalGlobalGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (userGrants.size() > 0) { - pm.deletePersistentAll(userGrants); - } - List dbGrants = listPrincipalAllDBGrant(mRol - .getRoleName(), PrincipalType.ROLE); - if (dbGrants.size() > 0) { - pm.deletePersistentAll(dbGrants); - } - List tabPartGrants = listPrincipalAllTableGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (tabPartGrants.size() > 0) { - pm.deletePersistentAll(tabPartGrants); - } - List partGrants = listPrincipalAllPartitionGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (partGrants.size() > 0) { - pm.deletePersistentAll(partGrants); - } - List tblColumnGrants = listPrincipalAllTableColumnGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (tblColumnGrants.size() > 0) { - pm.deletePersistentAll(tblColumnGrants); - } - List partColumnGrants = listPrincipalAllPartitionColumnGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (partColumnGrants.size() > 0) { - pm.deletePersistentAll(partColumnGrants); - } - // finally remove the role - pm.deletePersistent(mRol); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return success; - } - - /** - * Get all the roles in the role hierarchy that this user and groupNames belongs to - * @param userName - * @param groupNames - * @return - */ - private Set listAllRolesInHierarchy(String userName, - List groupNames) { - List ret = new ArrayList(); - if(userName != null) { - ret.addAll(listRoles(userName, PrincipalType.USER)); - } - if (groupNames != null) { - for (String groupName: groupNames) { - ret.addAll(listRoles(groupName, PrincipalType.GROUP)); - } - } - // get names of these roles and its ancestors - Set roleNames = new HashSet(); - getAllRoleAncestors(roleNames, ret); - return roleNames; - } - - /** - * Add role names of parentRoles and its parents to processedRoles - * - * @param processedRoleNames - * @param parentRoles - */ - private void getAllRoleAncestors(Set processedRoleNames, List parentRoles) { - for (MRoleMap parentRole : parentRoles) { - String parentRoleName = parentRole.getRole().getRoleName(); - if (!processedRoleNames.contains(parentRoleName)) { - // unprocessed role: get its parents, add it to processed, and call this - // function recursively - List nextParentRoles = listRoles(parentRoleName, PrincipalType.ROLE); - processedRoleNames.add(parentRoleName); - getAllRoleAncestors(processedRoleNames, nextParentRoles); - } - } - } - - @SuppressWarnings("unchecked") - @Override - public List listRoles(String principalName, - PrincipalType principalType) { - boolean success = false; - List mRoleMember = null; - try { - openTransaction(); - LOG.debug("Executing listRoles"); - Query query = pm - .newQuery( - MRoleMap.class, - "principalName == t1 && principalType == t2"); - query - .declareParameters("java.lang.String t1, java.lang.String t2"); - query.setUnique(false); - mRoleMember = (List) query.executeWithArray( - principalName, principalType.toString()); - LOG.debug("Done executing query for listMSecurityUserRoleMap"); - pm.retrieveAll(mRoleMember); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMSecurityUserRoleMap"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - - if (principalType == PrincipalType.USER) { - // All users belong to public role implicitly, add that role - if (mRoleMember == null) { - mRoleMember = new ArrayList(); - } else { - mRoleMember = new ArrayList(mRoleMember); - } - MRole publicRole = new MRole(HiveMetaStore.PUBLIC, 0, HiveMetaStore.PUBLIC); - mRoleMember.add(new MRoleMap(principalName, principalType.toString(), publicRole, 0, - null, null, false)); - } - return mRoleMember; - - } - - @SuppressWarnings("unchecked") - private List listMSecurityPrincipalMembershipRole(final String roleName, - final PrincipalType principalType) { - boolean success = false; - List mRoleMemebership = null; - try { - openTransaction(); - LOG.debug("Executing listMSecurityPrincipalMembershipRole"); - Query query = pm.newQuery(MRoleMap.class, - "principalName == t1 && principalType == t2"); - query - .declareParameters("java.lang.String t1, java.lang.String t2"); - mRoleMemebership = (List) query.execute(roleName, principalType.toString()); - LOG - .debug("Done executing query for listMSecurityPrincipalMembershipRole"); - pm.retrieveAll(mRoleMemebership); - success = commitTransaction(); - LOG - .debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mRoleMemebership; - } - - @Override - public Role getRole(String roleName) throws NoSuchObjectException { - MRole mRole = this.getMRole(roleName); - if (mRole == null) { - throw new NoSuchObjectException(roleName + " role can not be found."); - } - Role ret = new Role(mRole.getRoleName(), mRole.getCreateTime(), mRole - .getOwnerName()); - return ret; - } - - private MRole getMRole(String roleName) { - MRole mrole = null; - boolean commited = false; - try { - openTransaction(); - Query query = pm.newQuery(MRole.class, "roleName == t1"); - query.declareParameters("java.lang.String t1"); - query.setUnique(true); - mrole = (MRole) query.execute(roleName); - pm.retrieve(mrole); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return mrole; - } - - @Override - public List listRoleNames() { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listAllRoleNames"); - Query query = pm.newQuery("select roleName from org.apache.hadoop.hive.metastore.model.MRole"); - query.setResult("roleName"); - Collection names = (Collection) query.execute(); - List roleNames = new ArrayList(); - for (Iterator i = names.iterator(); i.hasNext();) { - roleNames.add((String) i.next()); - } - success = commitTransaction(); - return roleNames; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @Override - public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, - List groupNames) throws InvalidObjectException, MetaException { - boolean commited = false; - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - try { - openTransaction(); - if (userName != null) { - List user = this.listPrincipalGlobalGrants(userName, PrincipalType.USER); - if(user.size()>0) { - Map> userPriv = new HashMap>(); - List grantInfos = new ArrayList(user.size()); - for (int i = 0; i < user.size(); i++) { - MGlobalPrivilege item = user.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - userPriv.put(userName, grantInfos); - ret.setUserPrivileges(userPriv); - } - } - if (groupNames != null && groupNames.size() > 0) { - Map> groupPriv = new HashMap>(); - for(String groupName: groupNames) { - List group = this.listPrincipalGlobalGrants(groupName, PrincipalType.GROUP); - if(group.size()>0) { - List grantInfos = new ArrayList(group.size()); - for (int i = 0; i < group.size(); i++) { - MGlobalPrivilege item = group.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - groupPriv.put(groupName, grantInfos); - } - } - ret.setGroupPrivileges(groupPriv); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return ret; - } - - public List getDBPrivilege(String dbName, - String principalName, PrincipalType principalType) - throws InvalidObjectException, MetaException { - dbName = dbName.toLowerCase().trim(); - - if (principalName != null) { - List userNameDbPriv = this.listPrincipalDBGrants( - principalName, principalType, dbName); - if (userNameDbPriv != null && userNameDbPriv.size() > 0) { - List grantInfos = new ArrayList( - userNameDbPriv.size()); - for (int i = 0; i < userNameDbPriv.size(); i++) { - MDBPrivilege item = userNameDbPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } - return new ArrayList(0); - } - - - @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, - String userName, List groupNames) throws InvalidObjectException, - MetaException { - boolean commited = false; - dbName = dbName.toLowerCase().trim(); - - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - try { - openTransaction(); - if (userName != null) { - Map> dbUserPriv = new HashMap>(); - dbUserPriv.put(userName, getDBPrivilege(dbName, userName, - PrincipalType.USER)); - ret.setUserPrivileges(dbUserPriv); - } - if (groupNames != null && groupNames.size() > 0) { - Map> dbGroupPriv = new HashMap>(); - for (String groupName : groupNames) { - dbGroupPriv.put(groupName, getDBPrivilege(dbName, groupName, - PrincipalType.GROUP)); - } - ret.setGroupPrivileges(dbGroupPriv); - } - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (roleNames != null && roleNames.size() > 0) { - Map> dbRolePriv = new HashMap>(); - for (String roleName : roleNames) { - dbRolePriv - .put(roleName, getDBPrivilege(dbName, roleName, PrincipalType.ROLE)); - } - ret.setRolePrivileges(dbRolePriv); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return ret; - } - - @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, - String tableName, String partition, String userName, - List groupNames) throws InvalidObjectException, MetaException { - boolean commited = false; - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - try { - openTransaction(); - if (userName != null) { - Map> partUserPriv = new HashMap>(); - partUserPriv.put(userName, getPartitionPrivilege(dbName, - tableName, partition, userName, PrincipalType.USER)); - ret.setUserPrivileges(partUserPriv); - } - if (groupNames != null && groupNames.size() > 0) { - Map> partGroupPriv = new HashMap>(); - for (String groupName : groupNames) { - partGroupPriv.put(groupName, getPartitionPrivilege(dbName, tableName, - partition, groupName, PrincipalType.GROUP)); - } - ret.setGroupPrivileges(partGroupPriv); - } - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (roleNames != null && roleNames.size() > 0) { - Map> partRolePriv = new HashMap>(); - for (String roleName : roleNames) { - partRolePriv.put(roleName, getPartitionPrivilege(dbName, tableName, - partition, roleName, PrincipalType.ROLE)); - } - ret.setRolePrivileges(partRolePriv); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return ret; - } - - @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, - String tableName, String userName, List groupNames) - throws InvalidObjectException, MetaException { - boolean commited = false; - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - try { - openTransaction(); - if (userName != null) { - Map> tableUserPriv = new HashMap>(); - tableUserPriv.put(userName, getTablePrivilege(dbName, - tableName, userName, PrincipalType.USER)); - ret.setUserPrivileges(tableUserPriv); - } - if (groupNames != null && groupNames.size() > 0) { - Map> tableGroupPriv = new HashMap>(); - for (String groupName : groupNames) { - tableGroupPriv.put(groupName, getTablePrivilege(dbName, tableName, - groupName, PrincipalType.GROUP)); - } - ret.setGroupPrivileges(tableGroupPriv); - } - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (roleNames != null && roleNames.size() > 0) { - Map> tableRolePriv = new HashMap>(); - for (String roleName : roleNames) { - tableRolePriv.put(roleName, getTablePrivilege(dbName, tableName, - roleName, PrincipalType.ROLE)); - } - ret.setRolePrivileges(tableRolePriv); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return ret; - } - - @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, - String tableName, String partitionName, String columnName, - String userName, List groupNames) throws InvalidObjectException, - MetaException { - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - columnName = columnName.toLowerCase().trim(); - - boolean commited = false; - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - try { - openTransaction(); - if (userName != null) { - Map> columnUserPriv = new HashMap>(); - columnUserPriv.put(userName, getColumnPrivilege(dbName, tableName, - columnName, partitionName, userName, PrincipalType.USER)); - ret.setUserPrivileges(columnUserPriv); - } - if (groupNames != null && groupNames.size() > 0) { - Map> columnGroupPriv = new HashMap>(); - for (String groupName : groupNames) { - columnGroupPriv.put(groupName, getColumnPrivilege(dbName, tableName, - columnName, partitionName, groupName, PrincipalType.GROUP)); - } - ret.setGroupPrivileges(columnGroupPriv); - } - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (roleNames != null && roleNames.size() > 0) { - Map> columnRolePriv = new HashMap>(); - for (String roleName : roleNames) { - columnRolePriv.put(roleName, getColumnPrivilege(dbName, tableName, - columnName, partitionName, roleName, PrincipalType.ROLE)); - } - ret.setRolePrivileges(columnRolePriv); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return ret; - } - - private List getPartitionPrivilege(String dbName, - String tableName, String partName, String principalName, - PrincipalType principalType) { - - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - if (principalName != null) { - List userNameTabPartPriv = this - .listPrincipalPartitionGrants(principalName, principalType, - dbName, tableName, partName); - if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { - List grantInfos = new ArrayList( - userNameTabPartPriv.size()); - for (int i = 0; i < userNameTabPartPriv.size(); i++) { - MPartitionPrivilege item = userNameTabPartPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), - getPrincipalTypeFromStr(item.getGrantorType()), item.getGrantOption())); - - } - return grantInfos; - } - } - return new ArrayList(0); - } - - private PrincipalType getPrincipalTypeFromStr(String str) { - return str == null ? null : PrincipalType.valueOf(str); - } - - private List getTablePrivilege(String dbName, - String tableName, String principalName, PrincipalType principalType) { - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - if (principalName != null) { - List userNameTabPartPriv = this - .listAllTableGrants(principalName, principalType, - dbName, tableName); - if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { - List grantInfos = new ArrayList( - userNameTabPartPriv.size()); - for (int i = 0; i < userNameTabPartPriv.size(); i++) { - MTablePrivilege item = userNameTabPartPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } - return new ArrayList(0); - } - - private List getColumnPrivilege(String dbName, - String tableName, String columnName, String partitionName, - String principalName, PrincipalType principalType) { - - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - columnName = columnName.toLowerCase().trim(); - - if (partitionName == null) { - List userNameColumnPriv = this - .listPrincipalTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); - if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { - List grantInfos = new ArrayList( - userNameColumnPriv.size()); - for (int i = 0; i < userNameColumnPriv.size(); i++) { - MTableColumnPrivilege item = userNameColumnPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } else { - List userNameColumnPriv = this - .listPrincipalPartitionColumnGrants(principalName, - principalType, dbName, tableName, partitionName, columnName); - if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { - List grantInfos = new ArrayList( - userNameColumnPriv.size()); - for (int i = 0; i < userNameColumnPriv.size(); i++) { - MPartitionColumnPrivilege item = userNameColumnPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } - return new ArrayList(0); - } - - @Override - public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException, - MetaException, NoSuchObjectException { - boolean committed = false; - int now = (int) (System.currentTimeMillis() / 1000); - try { - openTransaction(); - List persistentObjs = new ArrayList(); - - List privilegeList = privileges.getPrivileges(); - - if (privilegeList != null && privilegeList.size() > 0) { - Iterator privIter = privilegeList.iterator(); - Set privSet = new HashSet(); - while (privIter.hasNext()) { - HiveObjectPrivilege privDef = privIter.next(); - HiveObjectRef hiveObject = privDef.getHiveObject(); - String privilegeStr = privDef.getGrantInfo().getPrivilege(); - String[] privs = privilegeStr.split(","); - String userName = privDef.getPrincipalName(); - PrincipalType principalType = privDef.getPrincipalType(); - String grantor = privDef.getGrantInfo().getGrantor(); - String grantorType = privDef.getGrantInfo().getGrantorType().toString(); - boolean grantOption = privDef.getGrantInfo().isGrantOption(); - privSet.clear(); - - if(principalType == PrincipalType.ROLE){ - validateRole(userName); - } - - if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { - List globalPrivs = this - .listPrincipalGlobalGrants(userName, principalType); - if (globalPrivs != null) { - for (MGlobalPrivilege priv : globalPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted by " + grantor); - } - MGlobalPrivilege mGlobalPrivs = new MGlobalPrivilege(userName, - principalType.toString(), privilege, now, grantor, grantorType, grantOption); - persistentObjs.add(mGlobalPrivs); - } - } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - MDatabase dbObj = getMDatabase(hiveObject.getDbName()); - if (dbObj != null) { - List dbPrivs = this.listPrincipalDBGrants( - userName, principalType, hiveObject.getDbName()); - if (dbPrivs != null) { - for (MDBPrivilege priv : dbPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on database " - + hiveObject.getDbName() + " by " + grantor); - } - MDBPrivilege mDb = new MDBPrivilege(userName, principalType - .toString(), dbObj, privilege, now, grantor, grantorType, grantOption); - persistentObjs.add(mDb); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { - MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject - .getObjectName()); - if (tblObj != null) { - List tablePrivs = this - .listAllTableGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName()); - if (tablePrivs != null) { - for (MTablePrivilege priv : tablePrivs) { - if (priv.getGrantor() != null - && priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on table [" - + hiveObject.getDbName() + "," - + hiveObject.getObjectName() + "] by " + grantor); - } - MTablePrivilege mTab = new MTablePrivilege( - userName, principalType.toString(), tblObj, - privilege, now, grantor, grantorType, grantOption); - persistentObjs.add(mTab); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { - MPartition partObj = this.getMPartition(hiveObject.getDbName(), - hiveObject.getObjectName(), hiveObject.getPartValues()); - String partName = null; - if (partObj != null) { - partName = partObj.getPartitionName(); - List partPrivs = this - .listPrincipalPartitionGrants(userName, - principalType, hiveObject.getDbName(), hiveObject - .getObjectName(), partObj.getPartitionName()); - if (partPrivs != null) { - for (MPartitionPrivilege priv : partPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on partition [" - + hiveObject.getDbName() + "," - + hiveObject.getObjectName() + "," - + partName + "] by " + grantor); - } - MPartitionPrivilege mTab = new MPartitionPrivilege(userName, - principalType.toString(), partObj, privilege, now, grantor, - grantorType, grantOption); - persistentObjs.add(mTab); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { - MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject - .getObjectName()); - if (tblObj != null) { - if (hiveObject.getPartValues() != null) { - MPartition partObj = null; - List colPrivs = null; - partObj = this.getMPartition(hiveObject.getDbName(), hiveObject - .getObjectName(), hiveObject.getPartValues()); - if (partObj == null) { - continue; - } - colPrivs = this.listPrincipalPartitionColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject - .getObjectName(), partObj.getPartitionName(), - hiveObject.getColumnName()); - - if (colPrivs != null) { - for (MPartitionColumnPrivilege priv : colPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on column " - + hiveObject.getColumnName() + " [" - + hiveObject.getDbName() + "," - + hiveObject.getObjectName() + "," - + partObj.getPartitionName() + "] by " + grantor); - } - MPartitionColumnPrivilege mCol = new MPartitionColumnPrivilege(userName, - principalType.toString(), partObj, hiveObject - .getColumnName(), privilege, now, grantor, grantorType, - grantOption); - persistentObjs.add(mCol); - } - - } else { - List colPrivs = null; - colPrivs = this.listPrincipalTableColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject - .getObjectName(), hiveObject.getColumnName()); - - if (colPrivs != null) { - for (MTableColumnPrivilege priv : colPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on column " - + hiveObject.getColumnName() + " [" - + hiveObject.getDbName() + "," - + hiveObject.getObjectName() + "] by " + grantor); - } - MTableColumnPrivilege mCol = new MTableColumnPrivilege(userName, - principalType.toString(), tblObj, hiveObject - .getColumnName(), privilege, now, grantor, grantorType, - grantOption); - persistentObjs.add(mCol); - } - } - } - } - } - } - if (persistentObjs.size() > 0) { - pm.makePersistentAll(persistentObjs); - } - committed = commitTransaction(); - } finally { - if (!committed) { - rollbackTransaction(); - } - } - return committed; - } - - @Override - public boolean revokePrivileges(PrivilegeBag privileges) - throws InvalidObjectException, MetaException, NoSuchObjectException { - boolean committed = false; - try { - openTransaction(); - List persistentObjs = new ArrayList(); - - List privilegeList = privileges.getPrivileges(); - - - if (privilegeList != null && privilegeList.size() > 0) { - Iterator privIter = privilegeList.iterator(); - - while (privIter.hasNext()) { - HiveObjectPrivilege privDef = privIter.next(); - HiveObjectRef hiveObject = privDef.getHiveObject(); - String privilegeStr = privDef.getGrantInfo().getPrivilege(); - if (privilegeStr == null || privilegeStr.trim().equals("")) { - continue; - } - String[] privs = privilegeStr.split(","); - String userName = privDef.getPrincipalName(); - PrincipalType principalType = privDef.getPrincipalType(); - - if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { - List mSecUser = this.listPrincipalGlobalGrants( - userName, principalType); - boolean found = false; - if (mSecUser != null) { - for (String privilege : privs) { - for (MGlobalPrivilege userGrant : mSecUser) { - String userGrantPrivs = userGrant.getPrivilege(); - if (privilege.equals(userGrantPrivs)) { - found = true; - persistentObjs.add(userGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException( - "No user grant found for privileges " + privilege); - } - } - } - - } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - MDatabase dbObj = getMDatabase(hiveObject.getDbName()); - if (dbObj != null) { - String db = hiveObject.getDbName(); - boolean found = false; - List dbGrants = this.listPrincipalDBGrants( - userName, principalType, db); - for (String privilege : privs) { - for (MDBPrivilege dbGrant : dbGrants) { - String dbGrantPriv = dbGrant.getPrivilege(); - if (privilege.equals(dbGrantPriv)) { - found = true; - persistentObjs.add(dbGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException( - "No database grant found for privileges " + privilege - + " on database " + db); - } - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { - boolean found = false; - List tableGrants = this - .listAllTableGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName()); - for (String privilege : privs) { - for (MTablePrivilege tabGrant : tableGrants) { - String tableGrantPriv = tabGrant.getPrivilege(); - if (privilege.equalsIgnoreCase(tableGrantPriv)) { - found = true; - persistentObjs.add(tabGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException("No grant (" + privilege - + ") found " + " on table " + hiveObject.getObjectName() - + ", database is " + hiveObject.getDbName()); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { - - boolean found = false; - Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject.getObjectName()); - String partName = null; - if (hiveObject.getPartValues() != null) { - partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); - } - List partitionGrants = this - .listPrincipalPartitionGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), partName); - for (String privilege : privs) { - for (MPartitionPrivilege partGrant : partitionGrants) { - String partPriv = partGrant.getPrivilege(); - if (partPriv.equalsIgnoreCase(privilege)) { - found = true; - persistentObjs.add(partGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException("No grant (" + privilege - + ") found " + " on table " + tabObj.getTableName() - + ", partition is " + partName + ", database is " + tabObj.getDbName()); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { - - Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject - .getObjectName()); - String partName = null; - if (hiveObject.getPartValues() != null) { - partName = Warehouse.makePartName(tabObj.getPartitionKeys(), - hiveObject.getPartValues()); - } - - if (partName != null) { - List mSecCol = listPrincipalPartitionColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject - .getObjectName(), partName, hiveObject.getColumnName()); - boolean found = false; - if (mSecCol != null) { - for (String privilege : privs) { - for (MPartitionColumnPrivilege col : mSecCol) { - String colPriv = col.getPrivilege(); - if (colPriv.equalsIgnoreCase(privilege)) { - found = true; - persistentObjs.add(col); - break; - } - } - if (!found) { - throw new InvalidObjectException("No grant (" + privilege - + ") found " + " on table " + tabObj.getTableName() - + ", partition is " + partName + ", column name = " - + hiveObject.getColumnName() + ", database is " - + tabObj.getDbName()); - } - } - } - } else { - List mSecCol = listPrincipalTableColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject - .getObjectName(), hiveObject.getColumnName()); - boolean found = false; - if (mSecCol != null) { - for (String privilege : privs) { - for (MTableColumnPrivilege col : mSecCol) { - String colPriv = col.getPrivilege(); - if (colPriv.equalsIgnoreCase(privilege)) { - found = true; - persistentObjs.add(col); - break; - } - } - if (!found) { - throw new InvalidObjectException("No grant (" + privilege - + ") found " + " on table " + tabObj.getTableName() - + ", column name = " - + hiveObject.getColumnName() + ", database is " - + tabObj.getDbName()); - } - } - } - } - - } - } - } - - if (persistentObjs.size() > 0) { - pm.deletePersistentAll(persistentObjs); - } - committed = commitTransaction(); - } finally { - if (!committed) { - rollbackTransaction(); - } - } - return committed; - } - - @SuppressWarnings("unchecked") - @Override - public List listRoleMembers(String roleName) { - boolean success = false; - List mRoleMemeberList = null; - try { - openTransaction(); - LOG.debug("Executing listMSecurityUserRoleMember"); - Query query = pm.newQuery(MRoleMap.class, - "role.roleName == t1"); - query.declareParameters("java.lang.String t1"); - query.setUnique(false); - mRoleMemeberList = (List) query.execute( - roleName); - LOG.debug("Done executing query for listMSecurityUserRoleMember"); - pm.retrieveAll(mRoleMemeberList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMSecurityUserRoleMember"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mRoleMemeberList; - } - - @SuppressWarnings("unchecked") - @Override - public List listPrincipalGlobalGrants(String principalName, PrincipalType principalType) { - boolean commited = false; - List userNameDbPriv = null; - try { - openTransaction(); - if (principalName != null) { - Query query = pm.newQuery(MGlobalPrivilege.class, - "principalName == t1 && principalType == t2 "); - query.declareParameters( - "java.lang.String t1, java.lang.String t2"); - userNameDbPriv = (List) query - .executeWithArray(principalName, principalType.toString()); - pm.retrieveAll(userNameDbPriv); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return userNameDbPriv; - } - - @Override - public List listGlobalGrantsAll() { - boolean commited = false; - try { - openTransaction(); - Query query = pm.newQuery(MGlobalPrivilege.class); - List userNameDbPriv = (List) query.execute(); - pm.retrieveAll(userNameDbPriv); - commited = commitTransaction(); - return convertGlobal(userNameDbPriv); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - } - - private List convertGlobal(List privs) { - List result = new ArrayList(); - for (MGlobalPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.GLOBAL, null, null, null, null); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); - } - return result; - } - - @SuppressWarnings("unchecked") - @Override - public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { - boolean success = false; - List mSecurityDBList = null; - dbName = dbName.toLowerCase().trim(); - - try { - openTransaction(); - LOG.debug("Executing listPrincipalDBGrants"); - Query query = pm.newQuery(MDBPrivilege.class, - "principalName == t1 && principalType == t2 && database.name == t3"); - query - .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - mSecurityDBList = (List) query.executeWithArray(principalName, principalType.toString(), dbName); - LOG.debug("Done executing query for listPrincipalDBGrants"); - pm.retrieveAll(mSecurityDBList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalDBGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityDBList; - } - - @Override - public List listPrincipalDBGrantsAll( - String principalName, PrincipalType principalType) { - return convertDB(listPrincipalAllDBGrant(principalName, principalType)); - } - - @Override - public List listDBGrantsAll(String dbName) { - return convertDB(listDatabaseGrants(dbName)); - } - - private List convertDB(List privs) { - List result = new ArrayList(); - for (MDBPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - String database = priv.getDatabase().getName(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.DATABASE, database, - null, null, null); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); - } - return result; - } - - @SuppressWarnings("unchecked") - private List listPrincipalAllDBGrant( - String principalName, PrincipalType principalType) { - boolean success = false; - List mSecurityDBList = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalAllDBGrant"); - if (principalName != null && principalType != null) { - Query query = pm.newQuery(MDBPrivilege.class, - "principalName == t1 && principalType == t2"); - query - .declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityDBList = (List) query.execute(principalName, principalType.toString()); - } else { - Query query = pm.newQuery(MDBPrivilege.class); - mSecurityDBList = (List) query.execute(); - } - LOG.debug("Done executing query for listPrincipalAllDBGrant"); - pm.retrieveAll(mSecurityDBList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalAllDBGrant"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityDBList; - } - - @SuppressWarnings("unchecked") - public List listAllTableGrants(String dbName, - String tableName) { - boolean success = false; - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - List mSecurityTabList = null; - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - try { - openTransaction(); - LOG.debug("Executing listAllTableGrants"); - String queryStr = "table.tableName == t1 && table.database.name == t2"; - Query query = pm.newQuery( - MTablePrivilege.class, queryStr); - query.declareParameters( - "java.lang.String t1, java.lang.String t2"); - mSecurityTabList = (List) query - .executeWithArray(tableName, dbName); - LOG.debug("Done executing query for listAllTableGrants"); - pm.retrieveAll(mSecurityTabList); - success = commitTransaction(); - LOG - .debug("Done retrieving all objects for listAllTableGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityTabList; - } - - @SuppressWarnings("unchecked") - public List listTableAllPartitionGrants(String dbName, - String tableName) { - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - boolean success = false; - List mSecurityTabPartList = null; - try { - openTransaction(); - LOG.debug("Executing listTableAllPartitionGrants"); - String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2"; - Query query = pm.newQuery( - MPartitionPrivilege.class, queryStr); - query.declareParameters( - "java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = (List) query - .executeWithArray(tableName, dbName); - LOG.debug("Done executing query for listTableAllPartitionGrants"); - pm.retrieveAll(mSecurityTabPartList); - success = commitTransaction(); - LOG - .debug("Done retrieving all objects for listTableAllPartitionGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityTabPartList; - } - - @SuppressWarnings("unchecked") - public List listTableAllColumnGrants(String dbName, - String tableName) { - boolean success = false; - List mTblColPrivilegeList = null; - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - try { - openTransaction(); - LOG.debug("Executing listTableAllColumnGrants"); - String queryStr = "table.tableName == t1 && table.database.name == t2"; - Query query = pm.newQuery(MTableColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mTblColPrivilegeList = (List) query - .executeWithArray(tableName, dbName); - LOG.debug("Done executing query for listTableAllColumnGrants"); - pm.retrieveAll(mTblColPrivilegeList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listTableAllColumnGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mTblColPrivilegeList; - } - - @SuppressWarnings("unchecked") - public List listTableAllPartitionColumnGrants(String dbName, - String tableName) { - boolean success = false; - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - List mSecurityColList = null; - try { - openTransaction(); - LOG.debug("Executing listTableAllPartitionColumnGrants"); - String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2"; - Query query = pm.newQuery(MPartitionColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityColList = (List) query - .executeWithArray(tableName, dbName); - LOG.debug("Done executing query for listTableAllPartitionColumnGrants"); - pm.retrieveAll(mSecurityColList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listTableAllPartitionColumnGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityColList; - } - - @SuppressWarnings("unchecked") - public List listPartitionAllColumnGrants(String dbName, - String tableName, List partNames) { - boolean success = false; - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - List mSecurityColList = null; - try { - openTransaction(); - LOG.debug("Executing listPartitionAllColumnGrants"); - mSecurityColList = queryByPartitionNames( - dbName, tableName, partNames, MPartitionColumnPrivilege.class, - "partition.table.tableName", "partition.table.database.name", "partition.partitionName"); - LOG.debug("Done executing query for listPartitionAllColumnGrants"); - pm.retrieveAll(mSecurityColList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPartitionAllColumnGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityColList; - } - - public void dropPartitionAllColumnGrantsNoTxn( - String dbName, String tableName, List partNames) { - ObjectPair queryWithParams = makeQueryByPartitionNames( - dbName, tableName, partNames, MPartitionColumnPrivilege.class, - "partition.table.tableName", "partition.table.database.name", "partition.partitionName"); - queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); - } - - @SuppressWarnings("unchecked") - private List listDatabaseGrants(String dbName) { - dbName = dbName.toLowerCase().trim(); - - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listDatabaseGrants"); - Query query = pm.newQuery(MDBPrivilege.class, - "database.name == t1"); - query.declareParameters("java.lang.String t1"); - List mSecurityDBList = (List) query - .executeWithArray(dbName); - LOG.debug("Done executing query for listDatabaseGrants"); - pm.retrieveAll(mSecurityDBList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listDatabaseGrants"); - return mSecurityDBList; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @SuppressWarnings("unchecked") - private List listPartitionGrants(String dbName, String tableName, - List partNames) { - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - boolean success = false; - List mSecurityTabPartList = null; - try { - openTransaction(); - LOG.debug("Executing listPartitionGrants"); - mSecurityTabPartList = queryByPartitionNames( - dbName, tableName, partNames, MPartitionPrivilege.class, "partition.table.tableName", - "partition.table.database.name", "partition.partitionName"); - LOG.debug("Done executing query for listPartitionGrants"); - pm.retrieveAll(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPartitionGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityTabPartList; - } - - private void dropPartitionGrantsNoTxn(String dbName, String tableName, List partNames) { - ObjectPair queryWithParams = makeQueryByPartitionNames( - dbName, tableName, partNames,MPartitionPrivilege.class, "partition.table.tableName", - "partition.table.database.name", "partition.partitionName"); - queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); - } - - @SuppressWarnings("unchecked") - private List queryByPartitionNames(String dbName, String tableName, - List partNames, Class clazz, String tbCol, String dbCol, String partCol) { - ObjectPair queryAndParams = makeQueryByPartitionNames( - dbName, tableName, partNames, clazz, tbCol, dbCol, partCol); - return (List)queryAndParams.getFirst().executeWithArray(queryAndParams.getSecond()); - } - - private ObjectPair makeQueryByPartitionNames( - String dbName, String tableName, List partNames, Class clazz, - String tbCol, String dbCol, String partCol) { - String queryStr = tbCol + " == t1 && " + dbCol + " == t2"; - String paramStr = "java.lang.String t1, java.lang.String t2"; - Object[] params = new Object[2 + partNames.size()]; - params[0] = tableName; - params[1] = dbName; - int index = 0; - for (String partName : partNames) { - params[index + 2] = partName; - queryStr += ((index == 0) ? " && (" : " || ") + partCol + " == p" + index; - paramStr += ", java.lang.String p" + index; - ++index; - } - queryStr += ")"; - Query query = pm.newQuery(clazz, queryStr); - query.declareParameters(paramStr); - return new ObjectPair(query, params); - } - - @Override - @SuppressWarnings("unchecked") - public List listAllTableGrants( - String principalName, PrincipalType principalType, String dbName, - String tableName) { - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - boolean success = false; - List mSecurityTabPartList = null; - try { - openTransaction(); - LOG.debug("Executing listAllTableGrants"); - Query query = pm.newQuery( - MTablePrivilege.class, - "principalName == t1 && principalType == t2 && table.tableName == t3 && table.database.name == t4"); - query.declareParameters( - "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"); - mSecurityTabPartList = (List) query - .executeWithArray(principalName, principalType.toString(), tableName, dbName); - LOG.debug("Done executing query for listAllTableGrants"); - pm.retrieveAll(mSecurityTabPartList); - success = commitTransaction(); - LOG - .debug("Done retrieving all objects for listAllTableGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityTabPartList; - } - - @SuppressWarnings("unchecked") - @Override - public List listPrincipalPartitionGrants( - String principalName, PrincipalType principalType, String dbName, - String tableName, String partName) { - boolean success = false; - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - - List mSecurityTabPartList = null; - try { - openTransaction(); - LOG.debug("Executing listMSecurityPrincipalPartitionGrant"); - Query query = pm.newQuery( - MPartitionPrivilege.class, - "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " + - "&& partition.table.database.name == t4 && partition.partitionName == t5"); - query.declareParameters( - "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " + - "java.lang.String t5"); - mSecurityTabPartList = (List) query - .executeWithArray(principalName, principalType.toString(), tableName, dbName, partName); - LOG.debug("Done executing query for listMSecurityPrincipalPartitionGrant"); - - pm.retrieveAll(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMSecurityPrincipalPartitionGrant"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityTabPartList; - } - - @SuppressWarnings("unchecked") - @Override - public List listPrincipalTableColumnGrants( - String principalName, PrincipalType principalType, String dbName, - String tableName, String columnName) { - boolean success = false; - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - columnName = columnName.toLowerCase().trim(); - List mSecurityColList = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalTableColumnGrants"); - String queryStr = "principalName == t1 && principalType == t2 && " + - "table.tableName == t3 && table.database.name == t4 && columnName == t5 "; - Query query = pm.newQuery(MTableColumnPrivilege.class, queryStr); - query - .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + - "java.lang.String t4, java.lang.String t5"); - mSecurityColList = (List) query.executeWithArray( - principalName, principalType.toString(), tableName, dbName, columnName); - LOG.debug("Done executing query for listPrincipalTableColumnGrants"); - pm.retrieveAll(mSecurityColList); - success = commitTransaction(); - LOG - .debug("Done retrieving all objects for listPrincipalTableColumnGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityColList; - } - - @Override - @SuppressWarnings("unchecked") - public List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, - String tableName, String partitionName, String columnName) { - boolean success = false; - tableName = tableName.toLowerCase().trim(); - dbName = dbName.toLowerCase().trim(); - columnName = columnName.toLowerCase().trim(); - - List mSecurityColList = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalPartitionColumnGrants"); - Query query = pm - .newQuery( - MPartitionColumnPrivilege.class, - "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " + - "&& partition.table.database.name == t4 && partition.partitionName == t5 && columnName == t6"); - query - .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + - "java.lang.String t4, java.lang.String t5, java.lang.String t6"); - - mSecurityColList = (List) query - .executeWithArray(principalName, principalType.toString(), tableName, - dbName, partitionName, columnName); - LOG.debug("Done executing query for listPrincipalPartitionColumnGrants"); - pm.retrieveAll(mSecurityColList); - - success = commitTransaction(); - LOG - .debug("Done retrieving all objects for listPrincipalPartitionColumnGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityColList; - } - - @Override - public List listPrincipalPartitionColumnGrantsAll( - String principalName, PrincipalType principalType) { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listPrincipalPartitionColumnGrantsAll"); - List mSecurityTabPartList; - if (principalName != null && principalType != null) { - Query query = pm.newQuery(MPartitionColumnPrivilege.class, - "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = (List) - query.executeWithArray(principalName, principalType.toString()); - } else { - Query query = pm.newQuery(MPartitionColumnPrivilege.class); - mSecurityTabPartList = (List) query.execute(); - } - LOG.debug("Done executing query for listPrincipalPartitionColumnGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertPartCols(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalPartitionColumnGrantsAll"); - return result; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @Override - public List listPartitionColumnGrantsAll( - String dbName, String tableName, String partitionName, String columnName) { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listPartitionColumnGrantsAll"); - Query query = pm.newQuery(MPartitionColumnPrivilege.class, - "partition.table.tableName == t3 && partition.table.database.name == t4 && " + - "partition.partitionName == t5 && columnName == t6"); - query.declareParameters( - "java.lang.String t3, java.lang.String t4, java.lang.String t5, java.lang.String t6"); - List mSecurityTabPartList = (List) - query.executeWithArray(tableName, dbName, partitionName, columnName); - LOG.debug("Done executing query for listPartitionColumnGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertPartCols(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPartitionColumnGrantsAll"); - return result; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - private List convertPartCols(List privs) { - List result = new ArrayList(); - for (MPartitionColumnPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - MPartition mpartition = priv.getPartition(); - MTable mtable = mpartition.getTable(); - MDatabase mdatabase = mtable.getDatabase(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.COLUMN, - mdatabase.getName(), mtable.getTableName(), mpartition.getValues(), priv.getColumnName()); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); - } - return result; - } - - @SuppressWarnings("unchecked") - private List listPrincipalAllTableGrants( - String principalName, PrincipalType principalType) { - boolean success = false; - List mSecurityTabPartList = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalAllTableGrants"); - Query query = pm.newQuery(MTablePrivilege.class, - "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = (List) query.execute( - principalName, principalType.toString()); - LOG - .debug("Done executing query for listPrincipalAllTableGrants"); - pm.retrieveAll(mSecurityTabPartList); - success = commitTransaction(); - LOG - .debug("Done retrieving all objects for listPrincipalAllTableGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityTabPartList; - } - - @Override - public List listPrincipalTableGrantsAll( - String principalName, PrincipalType principalType) { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listPrincipalAllTableGrants"); - List mSecurityTabPartList; - if (principalName != null && principalType != null) { - Query query = pm.newQuery(MTablePrivilege.class, - "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = (List) query.execute( - principalName, principalType.toString()); - } else { - Query query = pm.newQuery(MTablePrivilege.class); - mSecurityTabPartList = (List) query.execute(); - } - LOG.debug("Done executing query for listPrincipalAllTableGrants"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertTable(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); - return result; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @Override - public List listTableGrantsAll(String dbName, String tableName) { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listTableGrantsAll"); - Query query = pm.newQuery(MTablePrivilege.class, - "table.tableName == t1 && table.database.name == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - List mSecurityTabPartList = (List) - query.executeWithArray(tableName, dbName); - LOG.debug("Done executing query for listTableGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertTable(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); - return result; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - private List convertTable(List privs) { - List result = new ArrayList(); - for (MTablePrivilege priv : privs) { - String pname = priv.getPrincipalName(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - String table = priv.getTable().getTableName(); - String database = priv.getTable().getDatabase().getName(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.TABLE, database, table, - null, null); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); - } - return result; - } - - @SuppressWarnings("unchecked") - private List listPrincipalAllPartitionGrants( - String principalName, PrincipalType principalType) { - boolean success = false; - List mSecurityTabPartList = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalAllPartitionGrants"); - Query query = pm.newQuery(MPartitionPrivilege.class, - "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = (List) query.execute( - principalName, principalType.toString()); - LOG - .debug("Done executing query for listPrincipalAllPartitionGrants"); - pm.retrieveAll(mSecurityTabPartList); - success = commitTransaction(); - LOG - .debug("Done retrieving all objects for listPrincipalAllPartitionGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityTabPartList; - } - - @Override - public List listPrincipalPartitionGrantsAll( - String principalName, PrincipalType principalType) { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listPrincipalPartitionGrantsAll"); - List mSecurityTabPartList; - if (principalName != null && principalType != null) { - Query query = pm.newQuery(MPartitionPrivilege.class, - "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = (List) - query.execute(principalName, principalType.toString()); - } else { - Query query = pm.newQuery(MPartitionPrivilege.class); - mSecurityTabPartList = (List) query.execute(); - } - LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertPartition(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalPartitionGrantsAll"); - return result; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @Override - public List listPartitionGrantsAll( - String dbName, String tableName, String partitionName) { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listPrincipalPartitionGrantsAll"); - Query query = pm.newQuery(MPartitionPrivilege.class, - "partition.table.tableName == t3 && partition.table.database.name == t4 && " + - "partition.partitionName == t5"); - query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5"); - List mSecurityTabPartList = (List) - query.executeWithArray(tableName, dbName, partitionName); - LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertPartition(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalPartitionGrantsAll"); - return result; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - private List convertPartition(List privs) { - List result = new ArrayList(); - for (MPartitionPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - MPartition mpartition = priv.getPartition(); - MTable mtable = mpartition.getTable(); - MDatabase mdatabase = mtable.getDatabase(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.PARTITION, - mdatabase.getName(), mtable.getTableName(), mpartition.getValues(), null); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); - } - return result; - } - - @SuppressWarnings("unchecked") - private List listPrincipalAllTableColumnGrants( - String principalName, PrincipalType principalType) { - boolean success = false; - List mSecurityColumnList = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalAllTableColumnGrants"); - Query query = pm.newQuery(MTableColumnPrivilege.class, - "principalName == t1 && principalType == t2"); - query - .declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityColumnList = (List) query.execute( - principalName, principalType.toString()); - LOG.debug("Done executing query for listPrincipalAllTableColumnGrants"); - pm.retrieveAll(mSecurityColumnList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalAllTableColumnGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityColumnList; - } - - @Override - public List listPrincipalTableColumnGrantsAll( - String principalName, PrincipalType principalType) { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listPrincipalTableColumnGrantsAll"); - - List mSecurityTabPartList; - if (principalName != null && principalType != null) { - Query query = pm.newQuery(MTableColumnPrivilege.class, - "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = (List) - query.execute(principalName, principalType.toString()); - } else { - Query query = pm.newQuery(MTableColumnPrivilege.class); - mSecurityTabPartList = (List) query.execute(); - } - LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertTableCols(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll"); - return result; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @Override - public List listTableColumnGrantsAll( - String dbName, String tableName, String columnName) { - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing listPrincipalTableColumnGrantsAll"); - Query query = pm.newQuery(MTableColumnPrivilege.class, - "table.tableName == t3 && table.database.name == t4 && columnName == t5"); - query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5"); - List mSecurityTabPartList = (List) - query.executeWithArray(tableName, dbName, columnName); - LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertTableCols(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll"); - return result; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - private List convertTableCols(List privs) { - List result = new ArrayList(); - for (MTableColumnPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - MTable mtable = priv.getTable(); - MDatabase mdatabase = mtable.getDatabase(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.COLUMN, - mdatabase.getName(), mtable.getTableName(), null, priv.getColumnName()); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); - } - return result; - } - - @SuppressWarnings("unchecked") - private List listPrincipalAllPartitionColumnGrants( - String principalName, PrincipalType principalType) { - boolean success = false; - List mSecurityColumnList = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalAllTableColumnGrants"); - Query query = pm.newQuery(MPartitionColumnPrivilege.class, - "principalName == t1 && principalType == t2"); - query - .declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityColumnList = (List) query.execute( - principalName, principalType.toString()); - LOG.debug("Done executing query for listPrincipalAllTableColumnGrants"); - pm.retrieveAll(mSecurityColumnList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalAllTableColumnGrants"); - } finally { - if (!success) { - rollbackTransaction(); - } - } - return mSecurityColumnList; - } - - @Override - public boolean isPartitionMarkedForEvent(String dbName, String tblName, - Map partName, PartitionEventType evtType) throws UnknownTableException, - MetaException, InvalidPartitionException, UnknownPartitionException { - - Collection partEvents; - boolean success = false; - LOG.debug("Begin Executing isPartitionMarkedForEvent"); - try{ - openTransaction(); - Query query = pm.newQuery(MPartitionEvent.class, "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4"); - Table tbl = getTable(dbName, tblName); // Make sure dbName and tblName are valid. - if(null == tbl) { - throw new UnknownTableException("Table: "+ tblName + " is not found."); - } - partEvents = (Collection) query.executeWithArray(dbName, tblName, getPartitionStr(tbl, partName), evtType.getValue()); - pm.retrieveAll(partEvents); - success = commitTransaction(); - LOG.debug("Done executing isPartitionMarkedForEvent"); - } finally{ - if (!success) { - rollbackTransaction(); - } - } - return (partEvents != null && !partEvents.isEmpty()) ? true : false; - - } - - @Override - public Table markPartitionForEvent(String dbName, String tblName, Map partName, - PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - - LOG.debug("Begin executing markPartitionForEvent"); - boolean success = false; - Table tbl = null; - try{ - openTransaction(); - tbl = getTable(dbName, tblName); // Make sure dbName and tblName are valid. - if(null == tbl) { - throw new UnknownTableException("Table: "+ tblName + " is not found."); - } - pm.makePersistent(new MPartitionEvent(dbName,tblName,getPartitionStr(tbl, partName), evtType.getValue())); - success = commitTransaction(); - LOG.debug("Done executing markPartitionForEvent"); - } finally { - if(!success) { - rollbackTransaction(); - } - } - return tbl; - } - - private String getPartitionStr(Table tbl, Map partName) throws InvalidPartitionException{ - if(tbl.getPartitionKeysSize() != partName.size()){ - throw new InvalidPartitionException("Number of partition columns in table: "+ tbl.getPartitionKeysSize() + - " doesn't match with number of supplied partition values: "+partName.size()); - } - final List storedVals = new ArrayList(tbl.getPartitionKeysSize()); - for(FieldSchema partKey : tbl.getPartitionKeys()){ - String partVal = partName.get(partKey.getName()); - if(null == partVal) { - throw new InvalidPartitionException("No value found for partition column: "+partKey.getName()); - } - storedVals.add(partVal); - } - return join(storedVals,','); - } - - /** The following API - * - * - executeJDOQLSelect - * - * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. - * - */ - public Collection executeJDOQLSelect(String query) { - boolean committed = false; - Collection result = null; - - try { - openTransaction(); - Query q = pm.newQuery(query); - result = (Collection) q.execute(); - committed = commitTransaction(); - if (committed) { - return result; - } else { - return null; - } - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - /** The following API - * - * - executeJDOQLUpdate - * - * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. - * - */ - public long executeJDOQLUpdate(String query) { - boolean committed = false; - long numUpdated = 0; - - try { - openTransaction(); - Query q = pm.newQuery(query); - numUpdated = (Long) q.execute(); - committed = commitTransaction(); - if (committed) { - return numUpdated; - } else { - return -1; - } - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - /** The following API - * - * - listFSRoots - * - * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. - * - */ - public Set listFSRoots() { - boolean committed = false; - Set fsRoots = new HashSet(); - - try { - openTransaction(); - Query query = pm.newQuery(MDatabase.class); - List mDBs = (List) query.execute(); - pm.retrieveAll(mDBs); - - for (MDatabase mDB:mDBs) { - fsRoots.add(mDB.getLocationUri()); - } - committed = commitTransaction(); - if (committed) { - return fsRoots; - } else { - return null; - } - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - private boolean shouldUpdateURI(URI onDiskUri, URI inputUri) { - String onDiskHost = onDiskUri.getHost(); - String inputHost = inputUri.getHost(); - - int onDiskPort = onDiskUri.getPort(); - int inputPort = inputUri.getPort(); - - String onDiskScheme = onDiskUri.getScheme(); - String inputScheme = inputUri.getScheme(); - - //compare ports - if (inputPort != -1) { - if (inputPort != onDiskPort) { - return false; - } - } - //compare schemes - if (inputScheme != null) { - if (onDiskScheme == null) { - return false; - } - if (!inputScheme.equalsIgnoreCase(onDiskScheme)) { - return false; - } - } - //compare hosts - if (onDiskHost != null) { - if (!inputHost.equalsIgnoreCase(onDiskHost)) { - return false; - } - } else { - return false; - } - return true; - } - - public class UpdateMDatabaseURIRetVal { - private List badRecords; - private Map updateLocations; - - UpdateMDatabaseURIRetVal(List badRecords, Map updateLocations) { - this.badRecords = badRecords; - this.updateLocations = updateLocations; - } - - public List getBadRecords() { - return badRecords; - } - - public void setBadRecords(List badRecords) { - this.badRecords = badRecords; - } - - public Map getUpdateLocations() { - return updateLocations; - } - - public void setUpdateLocations(Map updateLocations) { - this.updateLocations = updateLocations; - } - } - - /** The following APIs - * - * - updateMDatabaseURI - * - * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. - * - */ - public UpdateMDatabaseURIRetVal updateMDatabaseURI(URI oldLoc, URI newLoc, boolean dryRun) { - boolean committed = false; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); - UpdateMDatabaseURIRetVal retVal = null; - - try { - openTransaction(); - Query query = pm.newQuery(MDatabase.class); - List mDBs = (List) query.execute(); - pm.retrieveAll(mDBs); - - for(MDatabase mDB:mDBs) { - URI locationURI = null; - String location = mDB.getLocationUri(); - try { - locationURI = new URI(location); - } catch(URISyntaxException e) { - badRecords.add(location); - } catch (NullPointerException e) { - badRecords.add(location); - } - if (locationURI == null) { - badRecords.add(location); - } else { - if (shouldUpdateURI(locationURI, oldLoc)) { - String dbLoc = mDB.getLocationUri().replaceAll(oldLoc.toString(), newLoc.toString()); - updateLocations.put(locationURI.toString(), dbLoc); - if (!dryRun) { - mDB.setLocationUri(dbLoc); - } - } - } - } - committed = commitTransaction(); - if (committed) { - retVal = new UpdateMDatabaseURIRetVal(badRecords, updateLocations); - } - return retVal; - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - public class UpdateMStorageDescriptorTblPropURIRetVal { - private List badRecords; - private Map updateLocations; - - UpdateMStorageDescriptorTblPropURIRetVal(List badRecords, - Map updateLocations) { - this.badRecords = badRecords; - this.updateLocations = updateLocations; - } - - public List getBadRecords() { - return badRecords; - } - - public void setBadRecords(List badRecords) { - this.badRecords = badRecords; - } - - public Map getUpdateLocations() { - return updateLocations; - } - - public void setUpdateLocations(Map updateLocations) { - this.updateLocations = updateLocations; - } - } - - /** The following APIs - * - * - updateMStorageDescriptorTblPropURI - * - * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. - * - */ - public UpdateMStorageDescriptorTblPropURIRetVal updateMStorageDescriptorTblPropURI(URI oldLoc, - URI newLoc, String tblPropKey, boolean isDryRun) { - boolean committed = false; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); - UpdateMStorageDescriptorTblPropURIRetVal retVal = null; - - try { - openTransaction(); - Query query = pm.newQuery(MStorageDescriptor.class); - List mSDSs = (List) query.execute(); - pm.retrieveAll(mSDSs); - - for(MStorageDescriptor mSDS:mSDSs) { - URI tablePropLocationURI = null; - if (mSDS.getParameters().containsKey(tblPropKey)) { - String tablePropLocation = mSDS.getParameters().get(tblPropKey); - try { - tablePropLocationURI = new URI(tablePropLocation); - } catch (URISyntaxException e) { - badRecords.add(tablePropLocation); - } catch (NullPointerException e) { - badRecords.add(tablePropLocation); - } - // if tablePropKey that was passed in lead to a valid URI resolution, update it if - //parts of it match the old-NN-loc, else add to badRecords - if (tablePropLocationURI == null) { - badRecords.add(tablePropLocation); - } else { - if (shouldUpdateURI(tablePropLocationURI, oldLoc)) { - String tblPropLoc = mSDS.getParameters().get(tblPropKey).replaceAll(oldLoc.toString(), - newLoc.toString()); - updateLocations.put(tablePropLocationURI.toString(), tblPropLoc); - if (!isDryRun) { - mSDS.getParameters().put(tblPropKey, tblPropLoc); - } - } - } - } - } - committed = commitTransaction(); - if (committed) { - retVal = new UpdateMStorageDescriptorTblPropURIRetVal(badRecords, updateLocations); - } - return retVal; - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - public class UpdateMStorageDescriptorTblURIRetVal { - private List badRecords; - private Map updateLocations; - - UpdateMStorageDescriptorTblURIRetVal(List badRecords, - Map updateLocations) { - this.badRecords = badRecords; - this.updateLocations = updateLocations; - } - - public List getBadRecords() { - return badRecords; - } - - public void setBadRecords(List badRecords) { - this.badRecords = badRecords; - } - - public Map getUpdateLocations() { - return updateLocations; - } - - public void setUpdateLocations(Map updateLocations) { - this.updateLocations = updateLocations; - } - } - - /** The following APIs - * - * - updateMStorageDescriptorTblURI - * - * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. - * - */ - public UpdateMStorageDescriptorTblURIRetVal updateMStorageDescriptorTblURI(URI oldLoc, URI newLoc, - boolean isDryRun) { - boolean committed = false; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); - UpdateMStorageDescriptorTblURIRetVal retVal = null; - - try { - openTransaction(); - Query query = pm.newQuery(MStorageDescriptor.class); - List mSDSs = (List) query.execute(); - pm.retrieveAll(mSDSs); - - for(MStorageDescriptor mSDS:mSDSs) { - URI locationURI = null; - String location = mSDS.getLocation(); - try { - locationURI = new URI(location); - } catch (URISyntaxException e) { - badRecords.add(location); - } catch (NullPointerException e) { - badRecords.add(location); - } - if (locationURI == null) { - badRecords.add(location); - } else { - if (shouldUpdateURI(locationURI, oldLoc)) { - String tblLoc = mSDS.getLocation().replaceAll(oldLoc.toString(), newLoc.toString()); - updateLocations.put(locationURI.toString(), tblLoc); - if (!isDryRun) { - mSDS.setLocation(tblLoc); - } - } - } - } - committed = commitTransaction(); - if (committed) { - retVal = new UpdateMStorageDescriptorTblURIRetVal(badRecords, updateLocations); - } - return retVal; - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - public class UpdateSerdeURIRetVal { - private List badRecords; - private Map updateLocations; - - UpdateSerdeURIRetVal(List badRecords, Map updateLocations) { - this.badRecords = badRecords; - this.updateLocations = updateLocations; - } - - public List getBadRecords() { - return badRecords; - } - - public void setBadRecords(List badRecords) { - this.badRecords = badRecords; - } - - public Map getUpdateLocations() { - return updateLocations; - } - - public void setUpdateLocations(Map updateLocations) { - this.updateLocations = updateLocations; - } - } - - /** The following APIs - * - * - updateSerdeURI - * - * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. - * - */ - public UpdateSerdeURIRetVal updateSerdeURI(URI oldLoc, URI newLoc, String serdeProp, - boolean isDryRun) { - boolean committed = false; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); - UpdateSerdeURIRetVal retVal = null; - - try { - openTransaction(); - Query query = pm.newQuery(MSerDeInfo.class); - List mSerdes = (List) query.execute(); - pm.retrieveAll(mSerdes); - - for(MSerDeInfo mSerde:mSerdes) { - if (mSerde.getParameters().containsKey(serdeProp)) { - String schemaLoc = mSerde.getParameters().get(serdeProp); - URI schemaLocURI = null; - try { - schemaLocURI = new URI(schemaLoc); - } catch (URISyntaxException e) { - badRecords.add(schemaLoc); - } catch (NullPointerException e) { - badRecords.add(schemaLoc); - } - if (schemaLocURI == null) { - badRecords.add(schemaLoc); - } else { - if (shouldUpdateURI(schemaLocURI, oldLoc)) { - String newSchemaLoc = schemaLoc.replaceAll(oldLoc.toString(), newLoc.toString()); - updateLocations.put(schemaLocURI.toString(), newSchemaLoc); - if (!isDryRun) { - mSerde.getParameters().put(serdeProp, newSchemaLoc); - } - } - } - } - } - committed = commitTransaction(); - if (committed) { - retVal = new UpdateSerdeURIRetVal(badRecords, updateLocations); - } - return retVal; - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - private void writeMTableColumnStatistics(Table table, MTableColumnStatistics mStatsObj) - throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - String dbName = mStatsObj.getDbName(); - String tableName = mStatsObj.getTableName(); - String colName = mStatsObj.getColName(); - - LOG.info("Updating table level column statistics for db=" + dbName + " tableName=" + tableName - + " colName=" + colName); - validateTableCols(table, Lists.newArrayList(colName)); - - List oldStats = - getMTableColumnStatistics(table, Lists.newArrayList(colName)); - - if (!oldStats.isEmpty()) { - assert oldStats.size() == 1; - StatObjectConverter.setFieldsIntoOldStats(mStatsObj, oldStats.get(0)); - } else { - pm.makePersistent(mStatsObj); - } - } - - private void writeMPartitionColumnStatistics(Table table, Partition partition, - MPartitionColumnStatistics mStatsObj) throws NoSuchObjectException, - MetaException, InvalidObjectException, InvalidInputException { - String dbName = mStatsObj.getDbName(); - String tableName = mStatsObj.getTableName(); - String partName = mStatsObj.getPartitionName(); - String colName = mStatsObj.getColName(); - - LOG.info("Updating partition level column statistics for db=" + dbName + " tableName=" + - tableName + " partName=" + partName + " colName=" + colName); - - boolean foundCol = false; - List colList = partition.getSd().getCols(); - for (FieldSchema col : colList) { - if (col.getName().equals(mStatsObj.getColName().trim())) { - foundCol = true; - break; - } - } - - if (!foundCol) { - throw new - NoSuchObjectException("Column " + colName + - " for which stats gathering is requested doesn't exist."); - } - - List oldStats = getMPartitionColumnStatistics( - table, Lists.newArrayList(partName), Lists.newArrayList(colName)); - if (!oldStats.isEmpty()) { - assert oldStats.size() == 1; - StatObjectConverter.setFieldsIntoOldStats(mStatsObj, oldStats.get(0)); - } else { - pm.makePersistent(mStatsObj); - } - } - - @Override - public boolean updateTableColumnStatistics(ColumnStatistics colStats) - throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean committed = false; - - openTransaction(); - try { - List statsObjs = colStats.getStatsObj(); - ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); - - // DataNucleus objects get detached all over the place for no (real) reason. - // So let's not use them anywhere unless absolutely necessary. - Table table = ensureGetTable(statsDesc.getDbName(), statsDesc.getTableName()); - for (ColumnStatisticsObj statsObj:statsObjs) { - // We have to get mtable again because DataNucleus. - MTableColumnStatistics mStatsObj = StatObjectConverter.convertToMTableColumnStatistics( - ensureGetMTable(statsDesc.getDbName(), statsDesc.getTableName()), statsDesc, statsObj); - writeMTableColumnStatistics(table, mStatsObj); - } - committed = commitTransaction(); - return committed; - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - @Override - public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List partVals) - throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean committed = false; - - try { - openTransaction(); - List statsObjs = colStats.getStatsObj(); - ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); - Table table = ensureGetTable(statsDesc.getDbName(), statsDesc.getTableName()); - Partition partition = convertToPart(getMPartition( - statsDesc.getDbName(), statsDesc.getTableName(), partVals)); - for (ColumnStatisticsObj statsObj:statsObjs) { - // We have to get partition again because DataNucleus - MPartition mPartition = getMPartition( - statsDesc.getDbName(), statsDesc.getTableName(), partVals); - if (partition == null) { - throw new NoSuchObjectException("Partition for which stats is gathered doesn't exist."); - } - MPartitionColumnStatistics mStatsObj = - StatObjectConverter.convertToMPartitionColumnStatistics(mPartition, statsDesc, statsObj); - writeMPartitionColumnStatistics(table, partition, mStatsObj); - } - committed = commitTransaction(); - return committed; - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - private List getMTableColumnStatistics( - Table table, List colNames) throws MetaException { - boolean committed = false; - openTransaction(); - try { - List result = null; - validateTableCols(table, colNames); - - Query query = pm.newQuery(MTableColumnStatistics.class); - String filter = "tableName == t1 && dbName == t2 && ("; - String paramStr = "java.lang.String t1, java.lang.String t2"; - Object[] params = new Object[colNames.size() + 2]; - params[0] = table.getTableName(); - params[1] = table.getDbName(); - for (int i = 0; i < colNames.size(); ++i) { - filter += ((i == 0) ? "" : " || ") + "colName == c" + i; - paramStr += ", java.lang.String c" + i; - params[i + 2] = colNames.get(i); - } - filter += ")"; - query.setFilter(filter); - query.declareParameters(paramStr); - result = (List) query.executeWithArray(params); - pm.retrieveAll(result); - if (result.size() > colNames.size()) { - throw new MetaException( - "Unexpected " + result.size() + " statistics for " + colNames.size() + " columns"); - } - committed = commitTransaction(); - return result; - } catch (Exception ex) { - LOG.error("Error retrieving statistics via jdo", ex); - if (ex instanceof MetaException) { - throw (MetaException)ex; - } - throw new MetaException(ex.getMessage()); - } finally { - if (!committed) { - rollbackTransaction(); - return Lists.newArrayList(); - } - } - } - - private void validateTableCols(Table table, List colNames) throws MetaException { - List colList = table.getSd().getCols(); - for (String colName : colNames) { - boolean foundCol = false; - for (FieldSchema mCol : colList) { - if (mCol.getName().equals(colName.trim())) { - foundCol = true; - break; - } - } - if (!foundCol) { - throw new MetaException("Column " + colName + " doesn't exist."); - } - } - } - - @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, - List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatisticsInternal(dbName, tableName, colNames, true, true); - } - - protected ColumnStatistics getTableColumnStatisticsInternal( - String dbName, String tableName, final List colNames, boolean allowSql, - boolean allowJdo) throws MetaException, NoSuchObjectException { - return new GetStatHelper(dbName.toLowerCase(), tableName.toLowerCase(), allowSql, allowJdo) { - @Override - protected ColumnStatistics getSqlResult(GetHelper ctx) throws MetaException { - return directSql.getTableStats(dbName, tblName, colNames); - } - @Override - protected ColumnStatistics getJdoResult( - GetHelper ctx) throws MetaException, NoSuchObjectException { - List mStats = getMTableColumnStatistics(getTable(), colNames); - if (mStats.isEmpty()) return null; - // LastAnalyzed is stored per column, but thrift object has it per multiple columns. - // Luckily, nobody actually uses it, so we will set to lowest value of all columns for now. - ColumnStatisticsDesc desc = StatObjectConverter.getTableColumnStatisticsDesc(mStats.get(0)); - List statObjs = new ArrayList(mStats.size()); - for (MTableColumnStatistics mStat : mStats) { - if (desc.getLastAnalyzed() > mStat.getLastAnalyzed()) { - desc.setLastAnalyzed(mStat.getLastAnalyzed()); - } - statObjs.add(StatObjectConverter.getTableColumnStatisticsObj(mStat)); - } - return new ColumnStatistics(desc, statObjs); - } - }.run(true); - } - - @Override - public List getPartitionColumnStatistics(String dbName, String tableName, - List partNames, List colNames) throws MetaException, NoSuchObjectException { - return getPartitionColumnStatisticsInternal( - dbName, tableName, partNames, colNames, true, true); - } - - protected List getPartitionColumnStatisticsInternal( - String dbName, String tableName, final List partNames, final List colNames, - boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - return new GetListHelper(dbName, tableName, allowSql, allowJdo) { - @Override - protected List getSqlResult( - GetHelper> ctx) throws MetaException { - return directSql.getPartitionStats(dbName, tblName, partNames, colNames); - } - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - List mStats = - getMPartitionColumnStatistics(getTable(), partNames, colNames); - List result = new ArrayList( - Math.min(mStats.size(), partNames.size())); - String lastPartName = null; - List curList = null; - ColumnStatisticsDesc csd = null; - for (int i = 0; i <= mStats.size(); ++i) { - boolean isLast = i == mStats.size(); - MPartitionColumnStatistics mStatsObj = isLast ? null : mStats.get(i); - String partName = isLast ? null : (String)mStatsObj.getPartitionName(); - if (isLast || !partName.equals(lastPartName)) { - if (i != 0) { - result.add(new ColumnStatistics(csd, curList)); - } - if (isLast) { - continue; - } - csd = StatObjectConverter.getPartitionColumnStatisticsDesc(mStatsObj); - curList = new ArrayList(colNames.size()); - } - curList.add(StatObjectConverter.getPartitionColumnStatisticsObj(mStatsObj)); - lastPartName = partName; - } - return result; - } - }.run(true); - } - - private List getMPartitionColumnStatistics( - Table table, List partNames, List colNames) - throws NoSuchObjectException, MetaException { - boolean committed = false; - MPartitionColumnStatistics mStatsObj = null; - try { - openTransaction(); - // We are not going to verify SD for each partition. Just verify for the table. - validateTableCols(table, colNames); - boolean foundCol = false; - Query query = pm.newQuery(MPartitionColumnStatistics.class); - String paramStr = "java.lang.String t1, java.lang.String t2"; - String filter = "tableName == t1 && dbName == t2 && ("; - Object[] params = new Object[colNames.size() + partNames.size() + 2]; - int i = 0; - params[i++] = table.getTableName(); - params[i++] = table.getDbName(); - int firstI = i; - for (String s : partNames) { - filter += ((i == firstI) ? "" : " || ") + "partitionName == p" + i; - paramStr += ", java.lang.String p" + i; - params[i++] = s; - } - filter += ") && ("; - firstI = i; - for (String s : colNames) { - filter += ((i == firstI) ? "" : " || ") + "colName == c" + i; - paramStr += ", java.lang.String c" + i; - params[i++] = s; - } - filter += ")"; - query.setFilter(filter); - query.declareParameters(paramStr); - query.setOrdering("partitionName ascending"); - @SuppressWarnings("unchecked") - List result = - (List) query.executeWithArray(params); - pm.retrieveAll(result); - committed = commitTransaction(); - return result; - } catch (Exception ex) { - LOG.error("Error retrieving statistics via jdo", ex); - if (ex instanceof MetaException) { - throw (MetaException)ex; - } - throw new MetaException(ex.getMessage()); - } finally { - if (!committed) { - rollbackTransaction(); - return Lists.newArrayList(); - } - } - } - - private void dropPartitionColumnStatisticsNoTxn( - String dbName, String tableName, List partNames) throws MetaException { - ObjectPair queryWithParams = makeQueryByPartitionNames( - dbName, tableName, partNames, MPartitionColumnStatistics.class, - "tableName", "dbName", "partition.partitionName"); - queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); - } - - @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, - String partName, List partVals, String colName) - throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean ret = false; - - if (dbName == null) { - dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; - } - - if (tableName == null) { - throw new InvalidInputException("Table name is null."); - } - - try { - openTransaction(); - MTable mTable = getMTable(dbName, tableName); - MPartitionColumnStatistics mStatsObj; - List mStatsObjColl; - - if (mTable == null) { - throw new - NoSuchObjectException("Table " + tableName + - " for which stats deletion is requested doesn't exist"); - } - - MPartition mPartition = - getMPartition(dbName, tableName, partVals); - - if (mPartition == null) { - throw new - NoSuchObjectException("Partition " + partName + - " for which stats deletion is requested doesn't exist"); - } - - Query query = pm.newQuery(MPartitionColumnStatistics.class); - String filter; - String parameters; - - if (colName != null) { - filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && " + - "colName == t4"; - parameters = "java.lang.String t1, java.lang.String t2, " + - "java.lang.String t3, java.lang.String t4"; - } else { - filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; - } - - query.setFilter(filter); - query - .declareParameters(parameters); - - if (colName != null) { - query.setUnique(true); - mStatsObj = (MPartitionColumnStatistics)query.executeWithArray(partName.trim(), - dbName.trim(), tableName.trim(), colName.trim()); - pm.retrieve(mStatsObj); - - if (mStatsObj != null) { - pm.deletePersistent(mStatsObj); - } else { - throw new NoSuchObjectException("Column stats doesn't exist for db=" +dbName + " table=" - + tableName + " partition=" + partName + " col=" + colName); - } - } else { - mStatsObjColl= (List)query.execute(partName.trim(), - dbName.trim(), tableName.trim()); - pm.retrieveAll(mStatsObjColl); - - if (mStatsObjColl != null) { - pm.deletePersistentAll(mStatsObjColl); - } else { - throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName + - " table=" + tableName + " partition" + partName); - } - } - ret = commitTransaction(); - } catch(NoSuchObjectException e) { - rollbackTransaction(); - throw e; - } finally { - if (!ret) { - rollbackTransaction(); - } - } - return ret; - } - - @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) - throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException - { - boolean ret = false; - - if (dbName == null) { - dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; - } - - if (tableName == null) { - throw new InvalidInputException("Table name is null."); - } - - try { - openTransaction(); - MTable mTable = getMTable(dbName, tableName); - MTableColumnStatistics mStatsObj; - List mStatsObjColl; - - if (mTable == null) { - throw new - NoSuchObjectException("Table " + tableName + - " for which stats deletion is requested doesn't exist"); - } - - Query query = pm.newQuery(MTableColumnStatistics.class); - String filter; - String parameters; - - if (colName != null) { - filter = "table.tableName == t1 && dbName == t2 && colName == t3"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; - } else { - filter = "table.tableName == t1 && dbName == t2"; - parameters = "java.lang.String t1, java.lang.String t2"; - } - - query.setFilter(filter); - query - .declareParameters(parameters); - - if (colName != null) { - query.setUnique(true); - mStatsObj = (MTableColumnStatistics)query.execute(tableName.trim(), - dbName.trim(), colName.trim()); - pm.retrieve(mStatsObj); - - if (mStatsObj != null) { - pm.deletePersistent(mStatsObj); - } else { - throw new NoSuchObjectException("Column stats doesn't exist for db=" +dbName + " table=" - + tableName + " col=" + colName); - } - } else { - mStatsObjColl= (List)query.execute(tableName.trim(), dbName.trim()); - pm.retrieveAll(mStatsObjColl); - - if (mStatsObjColl != null) { - pm.deletePersistentAll(mStatsObjColl); - } else { - throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName + - " table=" + tableName); - } - } - ret = commitTransaction(); - } catch(NoSuchObjectException e) { - rollbackTransaction(); - throw e; - } finally { - if (!ret) { - rollbackTransaction(); - } - } - return ret; - } - - @Override - public long cleanupEvents() { - boolean commited = false; - long delCnt; - LOG.debug("Begin executing cleanupEvents"); - Long expiryTime = HiveConf.getLongVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION) * 1000L; - Long curTime = System.currentTimeMillis(); - try { - openTransaction(); - Query query = pm.newQuery(MPartitionEvent.class,"curTime - eventTime > expiryTime"); - query.declareParameters("java.lang.Long curTime, java.lang.Long expiryTime"); - delCnt = query.deletePersistentAll(curTime, expiryTime); - commited = commitTransaction(); - } - finally { - if (!commited) { - rollbackTransaction(); - } - LOG.debug("Done executing cleanupEvents"); - } - return delCnt; - } - - private MDelegationToken getTokenFrom(String tokenId) { - Query query = pm.newQuery(MDelegationToken.class, "tokenIdentifier == tokenId"); - query.declareParameters("java.lang.String tokenId"); - query.setUnique(true); - return (MDelegationToken)query.execute(tokenId); - } - - @Override - public boolean addToken(String tokenId, String delegationToken) { - - LOG.debug("Begin executing addToken"); - boolean committed = false; - MDelegationToken token; - try{ - openTransaction(); - token = getTokenFrom(tokenId); - if (token == null) { - // add Token, only if it already doesn't exist - pm.makePersistent(new MDelegationToken(tokenId, delegationToken)); - } - committed = commitTransaction(); - } finally { - if(!committed) { - rollbackTransaction(); - } - } - LOG.debug("Done executing addToken with status : " + committed); - return committed && (token == null); - } - - @Override - public boolean removeToken(String tokenId) { - - LOG.debug("Begin executing removeToken"); - boolean committed = false; - MDelegationToken token; - try{ - openTransaction(); - token = getTokenFrom(tokenId); - if (null != token) { - pm.deletePersistent(token); - } - committed = commitTransaction(); - } finally { - if(!committed) { - rollbackTransaction(); - } - } - LOG.debug("Done executing removeToken with status : " + committed); - return committed && (token != null); - } - - @Override - public String getToken(String tokenId) { - - LOG.debug("Begin executing getToken"); - boolean committed = false; - MDelegationToken token; - try{ - openTransaction(); - token = getTokenFrom(tokenId); - if (null != token) { - pm.retrieve(token); - } - committed = commitTransaction(); - } finally { - if(!committed) { - rollbackTransaction(); - } - } - LOG.debug("Done executing getToken with status : " + committed); - return (null == token) ? null : token.getTokenStr(); - } - - @Override - public List getAllTokenIdentifiers() { - - LOG.debug("Begin executing getAllTokenIdentifiers"); - boolean committed = false; - List tokens; - try{ - openTransaction(); - Query query = pm.newQuery(MDelegationToken.class); - tokens = (List) query.execute(); - pm.retrieveAll(tokens); - committed = commitTransaction(); - } finally { - if(!committed) { - rollbackTransaction(); - } - } - LOG.debug("Done executing getAllTokenIdentifers with status : " + committed); - List tokenIdents = new ArrayList(tokens.size()); - - for (MDelegationToken token : tokens) { - tokenIdents.add(token.getTokenIdentifier()); - } - return tokenIdents; - } - - @Override - public int addMasterKey(String key) throws MetaException{ - LOG.debug("Begin executing addMasterKey"); - boolean committed = false; - MMasterKey masterKey = new MMasterKey(key); - try{ - openTransaction(); - pm.makePersistent(masterKey); - committed = commitTransaction(); - } finally { - if(!committed) { - rollbackTransaction(); - } - } - LOG.debug("Done executing addMasterKey with status : " + committed); - if (committed) { - return ((IntIdentity)pm.getObjectId(masterKey)).getKey(); - } else { - throw new MetaException("Failed to add master key."); - } - } - - @Override - public void updateMasterKey(Integer id, String key) throws NoSuchObjectException, MetaException { - LOG.debug("Begin executing updateMasterKey"); - boolean committed = false; - MMasterKey masterKey; - try{ - openTransaction(); - Query query = pm.newQuery(MMasterKey.class, "keyId == id"); - query.declareParameters("java.lang.Integer id"); - query.setUnique(true); - masterKey = (MMasterKey)query.execute(id); - if (null != masterKey) { - masterKey.setMasterKey(key); - } - committed = commitTransaction(); - } finally { - if(!committed) { - rollbackTransaction(); - } - } - LOG.debug("Done executing updateMasterKey with status : " + committed); - if (null == masterKey) { - throw new NoSuchObjectException("No key found with keyId: " + id); - } - if (!committed) { - throw new MetaException("Though key is found, failed to update it. " + id); - } - } - - @Override - public boolean removeMasterKey(Integer id) { - LOG.debug("Begin executing removeMasterKey"); - boolean success = false; - MMasterKey masterKey; - try{ - openTransaction(); - Query query = pm.newQuery(MMasterKey.class, "keyId == id"); - query.declareParameters("java.lang.Integer id"); - query.setUnique(true); - masterKey = (MMasterKey)query.execute(id); - if (null != masterKey) { - pm.deletePersistent(masterKey); - } - success = commitTransaction(); - } finally { - if(!success) { - rollbackTransaction(); - } - } - LOG.debug("Done executing removeMasterKey with status : " + success); - return (null != masterKey) && success; - } - - @Override - public String[] getMasterKeys() { - LOG.debug("Begin executing getMasterKeys"); - boolean committed = false; - List keys; - try{ - openTransaction(); - Query query = pm.newQuery(MMasterKey.class); - keys = (List) query.execute(); - pm.retrieveAll(keys); - committed = commitTransaction(); - } finally { - if(!committed) { - rollbackTransaction(); - } - } - LOG.debug("Done executing getMasterKeys with status : " + committed); - String[] masterKeys = new String[keys.size()]; - - for (int i = 0; i < keys.size(); i++) { - masterKeys[i] = keys.get(i).getMasterKey(); - } - return masterKeys; - } - - // compare hive version and metastore version - @Override - public void verifySchema() throws MetaException { - // If the schema version is already checked, then go ahead and use this metastore - if (isSchemaVerified.get()) { - return; - } - checkSchema(); - } - - private synchronized void checkSchema() throws MetaException { - // recheck if it got verified by another thread while we were waiting - if (isSchemaVerified.get()) { - return; - } - - boolean strictValidation = - HiveConf.getBoolVar(getConf(), HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION); - // read the schema version stored in metastore db - String schemaVer = getMetaStoreSchemaVersion(); - if (schemaVer == null) { - // metastore has no schema version information - if (strictValidation) { - throw new MetaException("Version information not found in metastore. "); - } else { - LOG.warn("Version information not found in metastore. " - + HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString() + - " is not enabled so recording the schema version " + - MetaStoreSchemaInfo.getHiveSchemaVersion()); - setMetaStoreSchemaVersion(MetaStoreSchemaInfo.getHiveSchemaVersion(), - "Set by MetaStore"); - } - } else { - // metastore schema version is different than Hive distribution needs - if (strictValidation) { - if (!schemaVer.equalsIgnoreCase(MetaStoreSchemaInfo.getHiveSchemaVersion())) { - throw new MetaException("Hive Schema version " - + MetaStoreSchemaInfo.getHiveSchemaVersion() + - " does not match metastore's schema version " + schemaVer + - " Metastore is not upgraded or corrupt"); - } else { - LOG.warn("Metastore version was " + schemaVer + " " + - HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString() + - " is not enabled so recording the new schema version " + - MetaStoreSchemaInfo.getHiveSchemaVersion()); - setMetaStoreSchemaVersion(MetaStoreSchemaInfo.getHiveSchemaVersion(), - "Set by MetaStore"); - } - } - } - isSchemaVerified.set(true); - return; - } - - // load the schema version stored in metastore db - @Override - public String getMetaStoreSchemaVersion() throws MetaException { - - MVersionTable mSchemaVer; - try { - mSchemaVer = getMSchemaVersion(); - } catch (NoSuchObjectException e) { - return null; - } - return mSchemaVer.getSchemaVersion(); - } - - @SuppressWarnings("unchecked") - private MVersionTable getMSchemaVersion() - throws NoSuchObjectException, MetaException { - boolean committed = false; - List mVerTables = new ArrayList(); - - try { - openTransaction(); - Query query = pm.newQuery(MVersionTable.class); - - try { - mVerTables = (List)query.execute(); - pm.retrieveAll(mVerTables); - } catch (JDODataStoreException e) { - if (e.getCause() instanceof MissingTableException) { - throw new MetaException("Version table not found. " + - "The metastore is not upgraded to " + MetaStoreSchemaInfo.getHiveSchemaVersion()); - } else { - throw e; - } - } - committed = commitTransaction(); - } finally { - if (!committed) { - rollbackTransaction(); - } - } - if (mVerTables.isEmpty()) { - throw new NoSuchObjectException("No matching version found"); - } - if (mVerTables.size() > 1) { - throw new MetaException("Metastore contains multiple versions"); - } - return mVerTables.get(0); - } - - @Override - public void setMetaStoreSchemaVersion(String schemaVersion, String comment) throws MetaException { - MVersionTable mSchemaVer; - boolean commited = false; - - try { - mSchemaVer = getMSchemaVersion(); - } catch (NoSuchObjectException e) { - // if the version doesn't exist, then create it - mSchemaVer = new MVersionTable(); - } - - mSchemaVer.setSchemaVersion(schemaVersion); - mSchemaVer.setVersionComment(comment); - try { - openTransaction(); - pm.makePersistent(mSchemaVer); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - } - - @Override - public boolean doesPartitionExist(String dbName, String tableName, List partVals) - throws MetaException { - boolean success = false; - try { - openTransaction(); - dbName = dbName.toLowerCase().trim(); - tableName = tableName.toLowerCase().trim(); - - // TODO: this could also be passed from upper layer; or this method should filter the list. - MTable mtbl = getMTable(dbName, tableName); - if (mtbl == null) { - success = commitTransaction(); - return false; - } - - Query query = pm.newQuery( - "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where table.tableName == t1 && table.database.name == t2 && partitionName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - query.setUnique(true); - query.setResult("partitionName"); - String name = Warehouse.makePartName( - convertToFieldSchemas(mtbl.getPartitionKeys()), partVals); - String result = (String)query.execute(tableName, dbName, name); - success = commitTransaction(); - return result != null; - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - - private void debugLog(String message) { - if (LOG.isDebugEnabled()) { - LOG.debug(message + getCallStack()); - } - } - - private static final int stackLimit = 5; - - private String getCallStack() { - StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); - int thislimit = Math.min(stackLimit, stackTrace.length); - StringBuilder sb = new StringBuilder(); - sb.append(" at:"); - for (int i = 4; i < thislimit; i++) { - sb.append("\n\t"); - sb.append(stackTrace[i].toString()); - } - return sb.toString(); - } - - private Function convertToFunction(MFunction mfunc) { - if (mfunc == null) { - return null; - } - - Function func = new Function(mfunc.getFunctionName(), - mfunc.getDatabase().getName(), - mfunc.getClassName(), - mfunc.getOwnerName(), - PrincipalType.valueOf(mfunc.getOwnerType()), - mfunc.getCreateTime(), - FunctionType.findByValue(mfunc.getFunctionType()), - convertToResourceUriList(mfunc.getResourceUris())); - return func; - } - - private MFunction convertToMFunction(Function func) throws InvalidObjectException { - if (func == null) { - return null; - } - - MDatabase mdb = null; - try { - mdb = getMDatabase(func.getDbName()); - } catch (NoSuchObjectException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new InvalidObjectException("Database " + func.getDbName() + " doesn't exist."); - } - - MFunction mfunc = new MFunction(func.getFunctionName(), - mdb, - func.getClassName(), - func.getOwnerName(), - func.getOwnerType().name(), - func.getCreateTime(), - func.getFunctionType().getValue(), - convertToMResourceUriList(func.getResourceUris())); - return mfunc; - } - - private List convertToResourceUriList(List mresourceUriList) { - List resourceUriList = null; - if (mresourceUriList != null) { - resourceUriList = new ArrayList(mresourceUriList.size()); - for (MResourceUri mres : mresourceUriList) { - resourceUriList.add( - new ResourceUri(ResourceType.findByValue(mres.getResourceType()), mres.getUri())); - } - } - return resourceUriList; - } - - private List convertToMResourceUriList(List resourceUriList) { - List mresourceUriList = null; - if (resourceUriList != null) { - mresourceUriList = new ArrayList(resourceUriList.size()); - for (ResourceUri res : resourceUriList) { - mresourceUriList.add(new MResourceUri(res.getResourceType().getValue(), res.getUri())); - } - } - return mresourceUriList; - } - - @Override - public void createFunction(Function func) throws InvalidObjectException, MetaException { - boolean committed = false; - try { - openTransaction(); - MFunction mfunc = convertToMFunction(func); - pm.makePersistent(mfunc); - committed = commitTransaction(); - } finally { - if (!committed) { - rollbackTransaction(); - } - } - } - - @Override - public void alterFunction(String dbName, String funcName, Function newFunction) - throws InvalidObjectException, MetaException { - boolean success = false; - try { - openTransaction(); - funcName = funcName.toLowerCase(); - dbName = dbName.toLowerCase(); - MFunction newf = convertToMFunction(newFunction); - if (newf == null) { - throw new InvalidObjectException("new function is invalid"); - } - - MFunction oldf = getMFunction(dbName, funcName); - if (oldf == null) { - throw new MetaException("function " + funcName + " doesn't exist"); - } - - // For now only alter name, owner, class name, type - oldf.setFunctionName(newf.getFunctionName().toLowerCase()); - oldf.setDatabase(newf.getDatabase()); - oldf.setOwnerName(newf.getOwnerName()); - oldf.setOwnerType(newf.getOwnerType()); - oldf.setClassName(newf.getClassName()); - oldf.setFunctionType(newf.getFunctionType()); - - // commit the changes - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - @Override - public void dropFunction(String dbName, String funcName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean success = false; - try { - openTransaction(); - MFunction mfunc = getMFunction(dbName, funcName); - pm.retrieve(mfunc); - if (mfunc != null) { - // TODO: When function privileges are implemented, they should be deleted here. - pm.deletePersistentAll(mfunc); - } - success = commitTransaction(); - } finally { - if (!success) { - rollbackTransaction(); - } - } - } - - private MFunction getMFunction(String db, String function) { - MFunction mfunc = null; - boolean commited = false; - try { - openTransaction(); - db = db.toLowerCase().trim(); - function = function.toLowerCase().trim(); - Query query = pm.newQuery(MFunction.class, "functionName == function && database.name == db"); - query.declareParameters("java.lang.String function, java.lang.String db"); - query.setUnique(true); - mfunc = (MFunction) query.execute(function, db); - pm.retrieve(mfunc); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return mfunc; - } - - @Override - public Function getFunction(String dbName, String funcName) throws MetaException { - boolean commited = false; - Function func = null; - try { - openTransaction(); - func = convertToFunction(getMFunction(dbName, funcName)); - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return func; - } - - @Override - public List getFunctions(String dbName, String pattern) - throws MetaException { - boolean commited = false; - List funcs = null; - try { - openTransaction(); - dbName = dbName.toLowerCase().trim(); - // Take the pattern and split it on the | to get all the composing - // patterns - String[] subpatterns = pattern.trim().split("\\|"); - String query = - "select functionName from org.apache.hadoop.hive.metastore.model.MFunction " - + "where database.name == dbName && ("; - boolean first = true; - for (String subpattern : subpatterns) { - subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); - if (!first) { - query = query + " || "; - } - query = query + " functionName.matches(\"" + subpattern + "\")"; - first = false; - } - query = query + ")"; - - Query q = pm.newQuery(query); - q.declareParameters("java.lang.String dbName"); - q.setResult("functionName"); - q.setOrdering("functionName ascending"); - Collection names = (Collection) q.execute(dbName); - funcs = new ArrayList(); - for (Iterator i = names.iterator(); i.hasNext();) { - funcs.add((String) i.next()); - } - commited = commitTransaction(); - } finally { - if (!commited) { - rollbackTransaction(); - } - } - return funcs; - } + private static Properties prop = null; + private static PersistenceManagerFactory pmf = null; + + private static Lock pmfPropLock = new ReentrantLock(); + private static final Log LOG = LogFactory.getLog(ObjectStore.class + .getName()); + + private static enum TXN_STATUS { + NO_STATE, OPEN, COMMITED, ROLLBACK + } + + private static final Map PINCLASSMAP; + static { + Map map = new HashMap(); + map.put("table", MTable.class); + map.put("storagedescriptor", MStorageDescriptor.class); + map.put("serdeinfo", MSerDeInfo.class); + map.put("partition", MPartition.class); + map.put("database", MDatabase.class); + map.put("type", MType.class); + map.put("fieldschema", MFieldSchema.class); + map.put("order", MOrder.class); + PINCLASSMAP = Collections.unmodifiableMap(map); + } + + private boolean isInitialized = false; + private PersistenceManager pm = null; + private MetaStoreDirectSql directSql = null; + private PartitionExpressionProxy expressionProxy = null; + private Configuration hiveConf; + int openTrasactionCalls = 0; + private Transaction currentTransaction = null; + private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE; + private final AtomicBoolean isSchemaVerified = new AtomicBoolean(false); + + public ObjectStore() { + } + + @Override + public Configuration getConf() { + return hiveConf; + } + + /** + * Called whenever this object is instantiated using ReflectionUils, and + * also on connection retries. In cases of connection retries, conf will + * usually contain modified values. + */ + @Override + @SuppressWarnings("nls") + public void setConf(Configuration conf) { + // Although an instance of ObjectStore is accessed by one thread, there + // may + // be many threads with ObjectStore instances. So the static variables + // pmf and prop need to be protected with locks. + pmfPropLock.lock(); + try { + isInitialized = false; + hiveConf = conf; + Properties propsFromConf = getDataSourceProps(conf); + boolean propsChanged = !propsFromConf.equals(prop); + + if (propsChanged) { + pmf = null; + prop = null; + } + + assert (!isActiveTransaction()); + shutdown(); + // Always want to re-create pm as we don't know if it were created + // by the + // most recent instance of the pmf + pm = null; + directSql = null; + expressionProxy = null; + openTrasactionCalls = 0; + currentTransaction = null; + transactionStatus = TXN_STATUS.NO_STATE; + + initialize(propsFromConf); + + if (!isInitialized) { + throw new RuntimeException( + "Unable to create persistence manager. Check dss.log for details"); + } else { + LOG.info("Initialized ObjectStore"); + } + } finally { + pmfPropLock.unlock(); + } + } + + private ClassLoader classLoader; + { + classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = ObjectStore.class.getClassLoader(); + } + } + + @SuppressWarnings("nls") + private void initialize(Properties dsProps) { + LOG.info("ObjectStore, initialize called"); + prop = dsProps; + pm = getPersistenceManager(); + isInitialized = pm != null; + if (isInitialized) { + expressionProxy = createExpressionProxy(hiveConf); + directSql = new MetaStoreDirectSql(pm); + } + } + + /** + * Creates the proxy used to evaluate expressions. This is here to prevent + * circular dependency - ql -> metastore client <-> metastore server + * -> ql. If server and client are split, this can be removed. + * + * @param conf + * Configuration. + * @return The partition expression proxy. + */ + private static PartitionExpressionProxy createExpressionProxy( + Configuration conf) { + String className = HiveConf.getVar(conf, + HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS); + try { + @SuppressWarnings("unchecked") + Class clazz = (Class) MetaStoreUtils + .getClass(className); + return MetaStoreUtils.newInstance(clazz, new Class[0], + new Object[0]); + } catch (MetaException e) { + LOG.error("Error loading PartitionExpressionProxy", e); + throw new RuntimeException( + "Error loading PartitionExpressionProxy: " + e.getMessage()); + } + } + + /** + * Properties specified in hive-default.xml override the properties + * specified in jpox.properties. + */ + @SuppressWarnings("nls") + private static Properties getDataSourceProps(Configuration conf) { + Properties prop = new Properties(); + + Iterator> iter = conf.iterator(); + while (iter.hasNext()) { + Map.Entry e = iter.next(); + if (e.getKey().contains("datanucleus") + || e.getKey().contains("jdo")) { + Object prevVal = prop.setProperty(e.getKey(), + conf.get(e.getKey())); + if (LOG.isDebugEnabled() + && !e.getKey().equals( + HiveConf.ConfVars.METASTOREPWD.varname)) { + LOG.debug("Overriding " + e.getKey() + " value " + prevVal + + " from jpox.properties with " + e.getValue()); + } + } + } + + if (LOG.isDebugEnabled()) { + for (Entry e : prop.entrySet()) { + if (!e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { + LOG.debug(e.getKey() + " = " + e.getValue()); + } + } + } + return prop; + } + + private static synchronized PersistenceManagerFactory getPMF() { + if (pmf == null) { + pmf = JDOHelper.getPersistenceManagerFactory(prop); + DataStoreCache dsc = pmf.getDataStoreCache(); + if (dsc != null) { + HiveConf conf = new HiveConf(ObjectStore.class); + String objTypes = HiveConf.getVar(conf, + HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES); + LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"" + + objTypes + "\""); + if (objTypes != null && objTypes.length() > 0) { + objTypes = objTypes.toLowerCase(); + String[] typeTokens = objTypes.split(","); + for (String type : typeTokens) { + type = type.trim(); + if (PINCLASSMAP.containsKey(type)) { + dsc.pinAll(true, PINCLASSMAP.get(type)); + } else { + LOG.warn(type + + " is not one of the pinnable object types: " + + org.apache.commons.lang.StringUtils.join( + PINCLASSMAP.keySet(), " ")); + } + } + } + } else { + LOG.warn("PersistenceManagerFactory returned null DataStoreCache object. Unable to initialize object pin types defined by hive.metastore.cache.pinobjtypes"); + } + } + return pmf; + } + + @InterfaceAudience.LimitedPrivate({ "HCATALOG" }) + @InterfaceStability.Evolving + public PersistenceManager getPersistenceManager() { + return getPMF().getPersistenceManager(); + } + + @Override + public void shutdown() { + if (pm != null) { + pm.close(); + } + } + + /** + * Opens a new one or the one already created Every call of this function + * must have corresponding commit or rollback function call + * + * @return an active transaction + */ + + @Override + public boolean openTransaction() { + openTrasactionCalls++; + if (openTrasactionCalls == 1) { + currentTransaction = pm.currentTransaction(); + currentTransaction.begin(); + transactionStatus = TXN_STATUS.OPEN; + } else { + // something is wrong since openTransactionCalls is greater than 1 + // but + // currentTransaction is not active + assert ((currentTransaction != null) && (currentTransaction + .isActive())); + } + + boolean result = currentTransaction.isActive(); + debugLog("Open transaction: count = " + openTrasactionCalls + + ", isActive = " + result); + return result; + } + + /** + * if this is the commit of the first open call then an actual commit is + * called. + * + * @return Always returns true + */ + @Override + @SuppressWarnings("nls") + public boolean commitTransaction() { + if (TXN_STATUS.ROLLBACK == transactionStatus) { + debugLog("Commit transaction: rollback"); + return false; + } + if (openTrasactionCalls <= 0) { + RuntimeException e = new RuntimeException( + "commitTransaction was called but openTransactionCalls = " + + openTrasactionCalls + + ". This probably indicates that there are unbalanced " + + "calls to openTransaction/commitTransaction"); + LOG.error(e); + throw e; + } + if (!currentTransaction.isActive()) { + RuntimeException e = new RuntimeException( + "commitTransaction was called but openTransactionCalls = " + + openTrasactionCalls + + ". This probably indicates that there are unbalanced " + + "calls to openTransaction/commitTransaction"); + LOG.error(e); + throw e; + } + openTrasactionCalls--; + debugLog("Commit transaction: count = " + openTrasactionCalls + + ", isactive " + currentTransaction.isActive()); + + if ((openTrasactionCalls == 0) && currentTransaction.isActive()) { + transactionStatus = TXN_STATUS.COMMITED; + currentTransaction.commit(); + } + + return true; + } + + /** + * @return true if there is an active transaction. If the current + * transaction is either committed or rolled back it returns false + */ + public boolean isActiveTransaction() { + if (currentTransaction == null) { + return false; + } + return currentTransaction.isActive(); + } + + /** + * Rolls back the current transaction if it is active + */ + @Override + public void rollbackTransaction() { + if (openTrasactionCalls < 1) { + debugLog("rolling back transaction: no open transactions: " + + openTrasactionCalls); + return; + } + openTrasactionCalls = 0; + debugLog("Rollback transaction, isActive: " + + currentTransaction.isActive()); + if (currentTransaction.isActive() + && transactionStatus != TXN_STATUS.ROLLBACK) { + transactionStatus = TXN_STATUS.ROLLBACK; + // could already be rolled back + currentTransaction.rollback(); + // remove all detached objects from the cache, since the transaction + // is + // being rolled back they are no longer relevant, and this prevents + // them + // from reattaching in future transactions + pm.evictAll(); + } + } + + @Override + public void createDatabase(Database db) throws InvalidObjectException, + MetaException { + boolean commited = false; + MDatabase mdb = new MDatabase(); + mdb.setName(db.getName().toLowerCase()); + mdb.setLocationUri(db.getLocationUri()); + mdb.setDescription(db.getDescription()); + mdb.setParameters(db.getParameters()); + mdb.setOwnerName(db.getOwnerName()); + PrincipalType ownerType = db.getOwnerType(); + mdb.setOwnerType((null == ownerType ? PrincipalType.USER.name() + : ownerType.name())); + try { + openTransaction(); + pm.makePersistent(mdb); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + } + + @SuppressWarnings("nls") + private MDatabase getMDatabase(String name) throws NoSuchObjectException { + MDatabase mdb = null; + boolean commited = false; + try { + openTransaction(); + name = name.toLowerCase().trim(); + Query query = pm.newQuery(MDatabase.class, "name == dbname"); + query.declareParameters("java.lang.String dbname"); + query.setUnique(true); + mdb = (MDatabase) query.execute(name); + pm.retrieve(mdb); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + if (mdb == null) { + throw new NoSuchObjectException("There is no database named " + + name); + } + return mdb; + } + + @Override + public Database getDatabase(String name) throws NoSuchObjectException { + MDatabase mdb = null; + boolean commited = false; + try { + openTransaction(); + mdb = getMDatabase(name); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + Database db = new Database(); + db.setName(mdb.getName()); + db.setDescription(mdb.getDescription()); + db.setLocationUri(mdb.getLocationUri()); + db.setParameters(mdb.getParameters()); + db.setOwnerName(mdb.getOwnerName()); + String type = mdb.getOwnerType(); + db.setOwnerType((null == type || type.trim().isEmpty()) ? null + : PrincipalType.valueOf(type)); + return db; + } + + /** + * Alter the database object in metastore. Currently only the parameters of + * the database or the owner can be changed. + * + * @param dbName + * the database name + * @param db + * the Hive Database object + * @throws MetaException + * @throws NoSuchObjectException + */ + @Override + public boolean alterDatabase(String dbName, Database db) + throws MetaException, NoSuchObjectException { + + MDatabase mdb = null; + boolean committed = false; + try { + mdb = getMDatabase(dbName); + mdb.setParameters(db.getParameters()); + mdb.setOwnerName(db.getOwnerName()); + if (db.getOwnerType() != null) { + mdb.setOwnerType(db.getOwnerType().name()); + } + openTransaction(); + pm.makePersistent(mdb); + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + return false; + } + } + return true; + } + + @Override + public boolean dropDatabase(String dbname) throws NoSuchObjectException, + MetaException { + boolean success = false; + LOG.info("Dropping database " + dbname + " along with all tables"); + dbname = dbname.toLowerCase(); + try { + openTransaction(); + + // then drop the database + MDatabase db = getMDatabase(dbname); + pm.retrieve(db); + if (db != null) { + List dbGrants = this.listDatabaseGrants(dbname); + if (dbGrants != null && dbGrants.size() > 0) { + pm.deletePersistentAll(dbGrants); + } + pm.deletePersistent(db); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public List getDatabases(String pattern) throws MetaException { + boolean commited = false; + List databases = null; + try { + openTransaction(); + // Take the pattern and split it on the | to get all the composing + // patterns + String[] subpatterns = pattern.trim().split("\\|"); + String query = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where ("; + boolean first = true; + for (String subpattern : subpatterns) { + subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); + if (!first) { + query = query + " || "; + } + query = query + " name.matches(\"" + subpattern + "\")"; + first = false; + } + query = query + ")"; + + Query q = pm.newQuery(query); + q.setResult("name"); + q.setOrdering("name ascending"); + Collection names = (Collection) q.execute(); + databases = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + databases.add((String) i.next()); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return databases; + } + + @Override + public List getAllDatabases() throws MetaException { + return getDatabases(".*"); + } + + private MType getMType(Type type) { + List fields = new ArrayList(); + if (type.getFields() != null) { + for (FieldSchema field : type.getFields()) { + fields.add(new MFieldSchema(field.getName(), field.getType(), + field.getComment())); + } + } + return new MType(type.getName(), type.getType1(), type.getType2(), + fields); + } + + private Type getType(MType mtype) { + List fields = new ArrayList(); + if (mtype.getFields() != null) { + for (MFieldSchema field : mtype.getFields()) { + fields.add(new FieldSchema(field.getName(), field.getType(), + field.getComment())); + } + } + Type ret = new Type(); + ret.setName(mtype.getName()); + ret.setType1(mtype.getType1()); + ret.setType2(mtype.getType2()); + ret.setFields(fields); + return ret; + } + + @Override + public boolean createType(Type type) { + boolean success = false; + MType mtype = getMType(type); + boolean commited = false; + try { + openTransaction(); + pm.makePersistent(mtype); + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public Type getType(String typeName) { + Type type = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MType.class, "name == typeName"); + query.declareParameters("java.lang.String typeName"); + query.setUnique(true); + MType mtype = (MType) query.execute(typeName.trim()); + pm.retrieve(type); + if (mtype != null) { + type = getType(mtype); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return type; + } + + @Override + public boolean dropType(String typeName) { + boolean success = false; + try { + openTransaction(); + Query query = pm.newQuery(MType.class, "name == typeName"); + query.declareParameters("java.lang.String typeName"); + query.setUnique(true); + MType type = (MType) query.execute(typeName.trim()); + pm.retrieve(type); + if (type != null) { + pm.deletePersistent(type); + } + success = commitTransaction(); + } catch (JDOObjectNotFoundException e) { + success = commitTransaction(); + LOG.debug("type not found " + typeName, e); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public void createTable(Table tbl) throws InvalidObjectException, + MetaException { + boolean commited = false; + try { + openTransaction(); + MTable mtbl = convertToMTable(tbl); + pm.makePersistent(mtbl); + PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges(); + List toPersistPrivObjs = new ArrayList(); + if (principalPrivs != null) { + int now = (int) (System.currentTimeMillis() / 1000); + + Map> userPrivs = principalPrivs + .getUserPrivileges(); + putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, + userPrivs, PrincipalType.USER); + + Map> groupPrivs = principalPrivs + .getGroupPrivileges(); + putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, + groupPrivs, PrincipalType.GROUP); + + Map> rolePrivs = principalPrivs + .getRolePrivileges(); + putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, + rolePrivs, PrincipalType.ROLE); + } + pm.makePersistentAll(toPersistPrivObjs); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + } + + /** + * Convert PrivilegeGrantInfo from privMap to MTablePrivilege, and add all + * of them to the toPersistPrivObjs. These privilege objects will be + * persisted as part of createTable. + * + * @param mtbl + * @param toPersistPrivObjs + * @param now + * @param privMap + * @param type + */ + private void putPersistentPrivObjects(MTable mtbl, + List toPersistPrivObjs, int now, + Map> privMap, PrincipalType type) { + if (privMap != null) { + for (Map.Entry> entry : privMap + .entrySet()) { + String principalName = entry.getKey(); + List privs = entry.getValue(); + for (int i = 0; i < privs.size(); i++) { + PrivilegeGrantInfo priv = privs.get(i); + if (priv == null) { + continue; + } + MTablePrivilege mTblSec = new MTablePrivilege( + principalName, type.toString(), mtbl, + priv.getPrivilege(), now, priv.getGrantor(), priv + .getGrantorType().toString(), + priv.isGrantOption()); + toPersistPrivObjs.add(mTblSec); + } + } + } + } + + @Override + public boolean dropTable(String dbName, String tableName) + throws MetaException, NoSuchObjectException, + InvalidObjectException, InvalidInputException { + boolean success = false; + try { + openTransaction(); + MTable tbl = getMTable(dbName, tableName); + pm.retrieve(tbl); + if (tbl != null) { + // first remove all the grants + List tabGrants = listAllTableGrants(dbName, + tableName); + if (tabGrants != null && tabGrants.size() > 0) { + pm.deletePersistentAll(tabGrants); + } + List tblColGrants = listTableAllColumnGrants( + dbName, tableName); + if (tblColGrants != null && tblColGrants.size() > 0) { + pm.deletePersistentAll(tblColGrants); + } + + List partGrants = this + .listTableAllPartitionGrants(dbName, tableName); + if (partGrants != null && partGrants.size() > 0) { + pm.deletePersistentAll(partGrants); + } + + List partColGrants = listTableAllPartitionColumnGrants( + dbName, tableName); + if (partColGrants != null && partColGrants.size() > 0) { + pm.deletePersistentAll(partColGrants); + } + // delete column statistics if present + try { + deleteTableColumnStatistics(dbName, tableName, null); + } catch (NoSuchObjectException e) { + LOG.info("Found no table level column statistics associated with db " + + dbName + + " table " + + tableName + + " record to delete"); + } + + preDropStorageDescriptor(tbl.getSd()); + // then remove the table + pm.deletePersistentAll(tbl); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public Table getTable(String dbName, String tableName) throws MetaException { + boolean commited = false; + Table tbl = null; + try { + openTransaction(); + tbl = convertToTable(getMTable(dbName, tableName)); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return tbl; + } + + @Override + public List getTables(String dbName, String pattern) + throws MetaException { + boolean commited = false; + List tbls = null; + try { + openTransaction(); + dbName = dbName.toLowerCase().trim(); + // Take the pattern and split it on the | to get all the composing + // patterns + String[] subpatterns = pattern.trim().split("\\|"); + String query = "select tableName from org.apache.hadoop.hive.metastore.model.MTable " + + "where database.name == dbName && ("; + boolean first = true; + for (String subpattern : subpatterns) { + subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); + if (!first) { + query = query + " || "; + } + query = query + " tableName.matches(\"" + subpattern + "\")"; + first = false; + } + query = query + ")"; + + Query q = pm.newQuery(query); + q.declareParameters("java.lang.String dbName"); + q.setResult("tableName"); + q.setOrdering("tableName ascending"); + Collection names = (Collection) q.execute(dbName); + tbls = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + tbls.add((String) i.next()); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return tbls; + } + + @Override + public List getAllTables(String dbName) throws MetaException { + return getTables(dbName, ".*"); + } + + private MTable getMTable(String db, String table) { + MTable mtbl = null; + boolean commited = false; + try { + openTransaction(); + db = db.toLowerCase().trim(); + table = table.toLowerCase().trim(); + Query query = pm.newQuery(MTable.class, + "tableName == table && database.name == db"); + query.declareParameters("java.lang.String table, java.lang.String db"); + query.setUnique(true); + mtbl = (MTable) query.execute(table, db); + pm.retrieve(mtbl); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mtbl; + } + + @Override + public List
getTableObjectsByName(String db, List tbl_names) + throws MetaException, UnknownDBException { + List
tables = new ArrayList
(); + boolean committed = false; + try { + openTransaction(); + + db = db.toLowerCase().trim(); + Query dbExistsQuery = pm.newQuery(MDatabase.class, "name == db"); + dbExistsQuery.declareParameters("java.lang.String db"); + dbExistsQuery.setUnique(true); + dbExistsQuery.setResult("name"); + String dbNameIfExists = (String) dbExistsQuery.execute(db); + if (dbNameIfExists == null || dbNameIfExists.isEmpty()) { + throw new UnknownDBException("Could not find database " + db); + } + + List lowered_tbl_names = new ArrayList(); + for (String t : tbl_names) { + lowered_tbl_names.add(t.toLowerCase().trim()); + } + Query query = pm.newQuery(MTable.class); + query.setFilter("database.name == db && tbl_names.contains(tableName)"); + query.declareParameters("java.lang.String db, java.util.Collection tbl_names"); + Collection mtables = (Collection) query.execute(db, + lowered_tbl_names); + for (Iterator iter = mtables.iterator(); iter.hasNext();) { + tables.add(convertToTable((MTable) iter.next())); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return tables; + } + + /** + * Makes shallow copy of a list to avoid DataNucleus mucking with our + * objects. + */ + private List convertList(List dnList) { + return (dnList == null) ? null : Lists.newArrayList(dnList); + } + + /** + * Makes shallow copy of a map to avoid DataNucleus mucking with our + * objects. + */ + private Map convertMap(Map dnMap) { + return (dnMap == null) ? null : Maps.newHashMap(dnMap); + } + + private Table convertToTable(MTable mtbl) throws MetaException { + if (mtbl == null) { + return null; + } + String tableType = mtbl.getTableType(); + if (tableType == null) { + // for backwards compatibility with old metastore persistence + if (mtbl.getViewOriginalText() != null) { + tableType = TableType.VIRTUAL_VIEW.toString(); + } else if ("TRUE".equals(mtbl.getParameters().get("EXTERNAL"))) { + tableType = TableType.EXTERNAL_TABLE.toString(); + } else { + tableType = TableType.MANAGED_TABLE.toString(); + } + } + return new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), + mtbl.getOwner(), mtbl.getCreateTime(), + mtbl.getLastAccessTime(), mtbl.getRetention(), + convertToStorageDescriptor(mtbl.getSd()), + convertToFieldSchemas(mtbl.getPartitionKeys()), + convertMap(mtbl.getParameters()), mtbl.getViewOriginalText(), + mtbl.getViewExpandedText(), tableType); + } + + private MTable convertToMTable(Table tbl) throws InvalidObjectException, + MetaException { + if (tbl == null) { + return null; + } + MDatabase mdb = null; + try { + mdb = getMDatabase(tbl.getDbName()); + } catch (NoSuchObjectException e) { + LOG.error(StringUtils.stringifyException(e)); + throw new InvalidObjectException("Database " + tbl.getDbName() + + " doesn't exist."); + } + + // If the table has property EXTERNAL set, update table type + // accordingly + String tableType = tbl.getTableType(); + boolean isExternal = "TRUE".equals(tbl.getParameters().get("EXTERNAL")); + if (TableType.MANAGED_TABLE.toString().equals(tableType)) { + if (isExternal) { + tableType = TableType.EXTERNAL_TABLE.toString(); + } + } + if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) { + if (!isExternal) { + tableType = TableType.MANAGED_TABLE.toString(); + } + } + + // A new table is always created with a new column descriptor + return new MTable(tbl.getTableName().toLowerCase(), mdb, + convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), + tbl.getCreateTime(), tbl.getLastAccessTime(), + tbl.getRetention(), + convertToMFieldSchemas(tbl.getPartitionKeys()), + tbl.getParameters(), tbl.getViewOriginalText(), + tbl.getViewExpandedText(), tableType); + } + + private List convertToMFieldSchemas(List keys) { + List mkeys = null; + if (keys != null) { + mkeys = new ArrayList(keys.size()); + for (FieldSchema part : keys) { + mkeys.add(new MFieldSchema(part.getName().toLowerCase(), part + .getType(), part.getComment())); + } + } + return mkeys; + } + + private List convertToFieldSchemas(List mkeys) { + List keys = null; + if (mkeys != null) { + keys = new ArrayList(mkeys.size()); + for (MFieldSchema part : mkeys) { + keys.add(new FieldSchema(part.getName(), part.getType(), part + .getComment())); + } + } + return keys; + } + + private List convertToMOrders(List keys) { + List mkeys = null; + if (keys != null) { + mkeys = new ArrayList(keys.size()); + for (Order part : keys) { + mkeys.add(new MOrder(part.getCol().toLowerCase(), part + .getOrder())); + } + } + return mkeys; + } + + private List convertToOrders(List mkeys) { + List keys = null; + if (mkeys != null) { + keys = new ArrayList(mkeys.size()); + for (MOrder part : mkeys) { + keys.add(new Order(part.getCol(), part.getOrder())); + } + } + return keys; + } + + private SerDeInfo converToSerDeInfo(MSerDeInfo ms) throws MetaException { + if (ms == null) { + throw new MetaException("Invalid SerDeInfo object"); + } + return new SerDeInfo(ms.getName(), ms.getSerializationLib(), + convertMap(ms.getParameters())); + } + + private MSerDeInfo converToMSerDeInfo(SerDeInfo ms) throws MetaException { + if (ms == null) { + throw new MetaException("Invalid SerDeInfo object"); + } + return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), + ms.getParameters()); + } + + /** + * Given a list of model field schemas, create a new model column + * descriptor. + * + * @param cols + * the columns the column descriptor contains + * @return a new column descriptor db-backed object + */ + private MColumnDescriptor createNewMColumnDescriptor(List cols) { + if (cols == null) { + return null; + } + return new MColumnDescriptor(cols); + } + + // MSD and SD should be same objects. Not sure how to make then same right + // now + // MSerdeInfo *& SerdeInfo should be same as well + private StorageDescriptor convertToStorageDescriptor( + MStorageDescriptor msd, boolean noFS) throws MetaException { + if (msd == null) { + return null; + } + List mFieldSchemas = msd.getCD() == null ? null : msd + .getCD().getCols(); + + StorageDescriptor sd = new StorageDescriptor(noFS ? null + : convertToFieldSchemas(mFieldSchemas), msd.getLocation(), + msd.getInputFormat(), msd.getOutputFormat(), + msd.isCompressed(), msd.getNumBuckets(), + converToSerDeInfo(msd.getSerDeInfo()), + convertList(msd.getBucketCols()), + convertToOrders(msd.getSortCols()), + convertMap(msd.getParameters())); + SkewedInfo skewedInfo = new SkewedInfo( + convertList(msd.getSkewedColNames()), + convertToSkewedValues(msd.getSkewedColValues()), + covertToSkewedMap(msd.getSkewedColValueLocationMaps())); + sd.setSkewedInfo(skewedInfo); + sd.setStoredAsSubDirectories(msd.isStoredAsSubDirectories()); + return sd; + } + + private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) + throws MetaException { + return convertToStorageDescriptor(msd, false); + } + + /** + * Convert a list of MStringList to a list of list string + * + * @param mLists + * @return + */ + private List> convertToSkewedValues(List mLists) { + List> lists = null; + if (mLists != null) { + lists = new ArrayList>(mLists.size()); + for (MStringList element : mLists) { + lists.add(new ArrayList(element.getInternalList())); + } + } + return lists; + } + + private List convertToMStringLists(List> mLists) { + List lists = null; + if (null != mLists) { + lists = new ArrayList(); + for (List mList : mLists) { + lists.add(new MStringList(mList)); + } + } + return lists; + } + + /** + * Convert a MStringList Map to a Map + * + * @param mMap + * @return + */ + private Map, String> covertToSkewedMap( + Map mMap) { + Map, String> map = null; + if (mMap != null) { + map = new HashMap, String>(mMap.size()); + Set keys = mMap.keySet(); + for (MStringList key : keys) { + map.put(new ArrayList(key.getInternalList()), + mMap.get(key)); + } + } + return map; + } + + /** + * Covert a Map to a MStringList Map + * + * @param mMap + * @return + */ + private Map covertToMapMStringList( + Map, String> mMap) { + Map map = null; + if (mMap != null) { + map = new HashMap(mMap.size()); + Set> keys = mMap.keySet(); + for (List key : keys) { + map.put(new MStringList(key), mMap.get(key)); + } + } + return map; + } + + /** + * Converts a storage descriptor to a db-backed storage descriptor. Creates + * a new db-backed column descriptor object for this SD. + * + * @param sd + * the storage descriptor to wrap in a db-backed object + * @return the storage descriptor db-backed object + * @throws MetaException + */ + private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd) + throws MetaException { + if (sd == null) { + return null; + } + MColumnDescriptor mcd = createNewMColumnDescriptor(convertToMFieldSchemas(sd + .getCols())); + return convertToMStorageDescriptor(sd, mcd); + } + + /** + * Converts a storage descriptor to a db-backed storage descriptor. It + * points the storage descriptor's column descriptor to the one passed as an + * argument, so it does not create a new mcolumn descriptor object. + * + * @param sd + * the storage descriptor to wrap in a db-backed object + * @param mcd + * the db-backed column descriptor + * @return the db-backed storage descriptor object + * @throws MetaException + */ + private MStorageDescriptor convertToMStorageDescriptor( + StorageDescriptor sd, MColumnDescriptor mcd) throws MetaException { + if (sd == null) { + return null; + } + return new MStorageDescriptor(mcd, sd.getLocation(), + sd.getInputFormat(), sd.getOutputFormat(), sd.isCompressed(), + sd.getNumBuckets(), converToMSerDeInfo(sd.getSerdeInfo()), + sd.getBucketCols(), convertToMOrders(sd.getSortCols()), + sd.getParameters(), (null == sd.getSkewedInfo()) ? null : sd + .getSkewedInfo().getSkewedColNames(), + convertToMStringLists((null == sd.getSkewedInfo()) ? null : sd + .getSkewedInfo().getSkewedColValues()), + covertToMapMStringList((null == sd.getSkewedInfo()) ? null : sd + .getSkewedInfo().getSkewedColValueLocationMaps()), + sd.isStoredAsSubDirectories()); + } + + @Override + public boolean addPartitions(String dbName, String tblName, + List parts) throws InvalidObjectException, MetaException { + boolean success = false; + openTransaction(); + try { + List tabGrants = null; + List tabColumnGrants = null; + MTable table = this.getMTable(dbName, tblName); + if ("TRUE".equalsIgnoreCase(table.getParameters().get( + "PARTITION_LEVEL_PRIVILEGE"))) { + tabGrants = this.listAllTableGrants(dbName, tblName); + tabColumnGrants = this + .listTableAllColumnGrants(dbName, tblName); + } + List toPersist = new ArrayList(); + for (Partition part : parts) { + if (!part.getTableName().equals(tblName) + || !part.getDbName().equals(dbName)) { + throw new MetaException( + "Partition does not belong to target table " + + dbName + "." + tblName + ": " + part); + } + MPartition mpart = convertToMPart(part, true); + toPersist.add(mpart); + int now = (int) (System.currentTimeMillis() / 1000); + if (tabGrants != null) { + for (MTablePrivilege tab : tabGrants) { + toPersist.add(new MPartitionPrivilege(tab + .getPrincipalName(), tab.getPrincipalType(), + mpart, tab.getPrivilege(), now, tab + .getGrantor(), tab.getGrantorType(), + tab.getGrantOption())); + } + } + + if (tabColumnGrants != null) { + for (MTableColumnPrivilege col : tabColumnGrants) { + toPersist.add(new MPartitionColumnPrivilege(col + .getPrincipalName(), col.getPrincipalType(), + mpart, col.getColumnName(), col.getPrivilege(), + now, col.getGrantor(), col.getGrantorType(), + col.getGrantOption())); + } + } + } + if (toPersist.size() > 0) { + pm.makePersistentAll(toPersist); + } + + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public boolean addPartition(Partition part) throws InvalidObjectException, + MetaException { + boolean success = false; + boolean commited = false; + try { + MTable table = this + .getMTable(part.getDbName(), part.getTableName()); + List tabGrants = null; + List tabColumnGrants = null; + if ("TRUE".equalsIgnoreCase(table.getParameters().get( + "PARTITION_LEVEL_PRIVILEGE"))) { + tabGrants = this.listAllTableGrants(part.getDbName(), + part.getTableName()); + tabColumnGrants = this.listTableAllColumnGrants( + part.getDbName(), part.getTableName()); + } + openTransaction(); + MPartition mpart = convertToMPart(part, true); + pm.makePersistent(mpart); + + int now = (int) (System.currentTimeMillis() / 1000); + List toPersist = new ArrayList(); + if (tabGrants != null) { + for (MTablePrivilege tab : tabGrants) { + MPartitionPrivilege partGrant = new MPartitionPrivilege( + tab.getPrincipalName(), tab.getPrincipalType(), + mpart, tab.getPrivilege(), now, tab.getGrantor(), + tab.getGrantorType(), tab.getGrantOption()); + toPersist.add(partGrant); + } + } + + if (tabColumnGrants != null) { + for (MTableColumnPrivilege col : tabColumnGrants) { + MPartitionColumnPrivilege partColumn = new MPartitionColumnPrivilege( + col.getPrincipalName(), col.getPrincipalType(), + mpart, col.getColumnName(), col.getPrivilege(), + now, col.getGrantor(), col.getGrantorType(), + col.getGrantOption()); + toPersist.add(partColumn); + } + + if (toPersist.size() > 0) { + pm.makePersistentAll(toPersist); + } + } + + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public Partition getPartition(String dbName, String tableName, + List part_vals) throws NoSuchObjectException, MetaException { + openTransaction(); + Partition part = convertToPart(getMPartition(dbName, tableName, + part_vals)); + commitTransaction(); + if (part == null) { + throw new NoSuchObjectException("partition values=" + + part_vals.toString()); + } + part.setValues(part_vals); + return part; + } + + private MPartition getMPartition(String dbName, String tableName, + List part_vals) throws MetaException { + MPartition mpart = null; + boolean commited = false; + try { + openTransaction(); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); + MTable mtbl = getMTable(dbName, tableName); + if (mtbl == null) { + commited = commitTransaction(); + return null; + } + // Change the query to use part_vals instead of the name which is + // redundant TODO: callers of this often get part_vals out of name + // for no reason... + String name = Warehouse.makePartName( + convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals); + Query query = pm + .newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && partitionName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setUnique(true); + mpart = (MPartition) query.execute(tableName, dbName, name); + pm.retrieve(mpart); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mpart; + } + + /** + * Convert a Partition object into an MPartition, which is an object backed + * by the db If the Partition's set of columns is the same as the parent + * table's AND useTableCD is true, then this partition's storage + * descriptor's column descriptor will point to the same one as the table's + * storage descriptor. + * + * @param part + * the partition to convert + * @param useTableCD + * whether to try to use the parent table's column descriptor. + * @return the model partition object + * @throws InvalidObjectException + * @throws MetaException + */ + private MPartition convertToMPart(Partition part, boolean useTableCD) + throws InvalidObjectException, MetaException { + if (part == null) { + return null; + } + MTable mt = getMTable(part.getDbName(), part.getTableName()); + if (mt == null) { + throw new InvalidObjectException( + "Partition doesn't have a valid table or database name"); + } + + // If this partition's set of columns is the same as the parent table's, + // use the parent table's, so we do not create a duplicate column + // descriptor, + // thereby saving space + MStorageDescriptor msd; + if (useTableCD + && mt.getSd() != null + && mt.getSd().getCD() != null + && mt.getSd().getCD().getCols() != null + && part.getSd() != null + && convertToFieldSchemas(mt.getSd().getCD().getCols()).equals( + part.getSd().getCols())) { + msd = convertToMStorageDescriptor(part.getSd(), mt.getSd().getCD()); + } else { + msd = convertToMStorageDescriptor(part.getSd()); + } + + return new MPartition( + Warehouse.makePartName( + convertToFieldSchemas(mt.getPartitionKeys()), + part.getValues()), mt, part.getValues(), + part.getCreateTime(), part.getLastAccessTime(), msd, + part.getParameters()); + } + + private Partition convertToPart(MPartition mpart) throws MetaException { + if (mpart == null) { + return null; + } + return new Partition(convertList(mpart.getValues()), mpart.getTable() + .getDatabase().getName(), mpart.getTable().getTableName(), + mpart.getCreateTime(), mpart.getLastAccessTime(), + convertToStorageDescriptor(mpart.getSd()), + convertMap(mpart.getParameters())); + } + + private Partition convertToPart(String dbName, String tblName, + MPartition mpart) throws MetaException { + if (mpart == null) { + return null; + } + return new Partition(convertList(mpart.getValues()), dbName, tblName, + mpart.getCreateTime(), mpart.getLastAccessTime(), + convertToStorageDescriptor(mpart.getSd(), false), + convertMap(mpart.getParameters())); + } + + @Override + public boolean dropPartition(String dbName, String tableName, + List part_vals) throws MetaException, + NoSuchObjectException, InvalidObjectException, + InvalidInputException { + boolean success = false; + try { + openTransaction(); + MPartition part = getMPartition(dbName, tableName, part_vals); + dropPartitionCommon(part); + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public void dropPartitions(String dbName, String tblName, + List partNames) throws MetaException, NoSuchObjectException { + if (partNames.isEmpty()) + return; + boolean success = false; + openTransaction(); + try { + // Delete all things. + dropPartitionGrantsNoTxn(dbName, tblName, partNames); + dropPartitionAllColumnGrantsNoTxn(dbName, tblName, partNames); + dropPartitionColumnStatisticsNoTxn(dbName, tblName, partNames); + + // CDs are reused; go thry partition SDs, detach all CDs from SDs, + // then remove unused CDs. + for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(dbName, tblName, + partNames)) { + removeUnusedColumnDescriptor(mcd); + } + dropPartitionsNoTxn(dbName, tblName, partNames); + if (!(success = commitTransaction())) { + throw new MetaException("Failed to drop partitions"); // Should + // not + // happen? + } + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + /** + * Drop an MPartition and cascade deletes (e.g., delete partition privilege + * grants, drop the storage descriptor cleanly, etc.) + * + * @param part + * - the MPartition to drop + * @return whether the transaction committed successfully + * @throws InvalidInputException + * @throws InvalidObjectException + * @throws MetaException + * @throws NoSuchObjectException + */ + private boolean dropPartitionCommon(MPartition part) + throws NoSuchObjectException, MetaException, + InvalidObjectException, InvalidInputException { + boolean success = false; + try { + openTransaction(); + if (part != null) { + List schemas = part.getTable().getPartitionKeys(); + List colNames = new ArrayList(); + for (MFieldSchema col : schemas) { + colNames.add(col.getName()); + } + String partName = FileUtils.makePartName(colNames, + part.getValues()); + + List partGrants = listPartitionGrants(part + .getTable().getDatabase().getName(), part.getTable() + .getTableName(), Lists.newArrayList(partName)); + + if (partGrants != null && partGrants.size() > 0) { + pm.deletePersistentAll(partGrants); + } + + List partColumnGrants = listPartitionAllColumnGrants( + part.getTable().getDatabase().getName(), part + .getTable().getTableName(), + Lists.newArrayList(partName)); + if (partColumnGrants != null && partColumnGrants.size() > 0) { + pm.deletePersistentAll(partColumnGrants); + } + + String dbName = part.getTable().getDatabase().getName(); + String tableName = part.getTable().getTableName(); + + // delete partition level column stats if it exists + try { + deletePartitionColumnStatistics(dbName, tableName, + partName, part.getValues(), null); + } catch (NoSuchObjectException e) { + LOG.info("No column statistics records found to delete"); + } + + preDropStorageDescriptor(part.getSd()); + pm.deletePersistent(part); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public List getPartitions(String dbName, String tableName, + int maxParts) throws MetaException, NoSuchObjectException { + return getPartitionsInternal(dbName, tableName, maxParts, true, true); + } + + protected List getPartitionsInternal(String dbName, + String tblName, final int maxParts, boolean allowSql, + boolean allowJdo) throws MetaException, NoSuchObjectException { + return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + @Override + protected List getSqlResult( + GetHelper> ctx) throws MetaException { + Integer max = (maxParts < 0) ? null : maxParts; + return directSql.getPartitions(dbName, tblName, max); + } + + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException, + NoSuchObjectException { + return convertToParts(listMPartitions(dbName, tblName, maxParts)); + } + }.run(false); + } + + @Override + public List getPartitionsWithAuth(String dbName, String tblName, + short max, String userName, List groupNames) + throws MetaException, NoSuchObjectException, InvalidObjectException { + boolean success = false; + try { + openTransaction(); + List mparts = listMPartitions(dbName, tblName, max); + List parts = new ArrayList(mparts.size()); + if (mparts != null && mparts.size() > 0) { + for (MPartition mpart : mparts) { + MTable mtbl = mpart.getTable(); + Partition part = convertToPart(mpart); + parts.add(part); + + if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get( + "PARTITION_LEVEL_PRIVILEGE"))) { + String partName = Warehouse.makePartName( + this.convertToFieldSchemas(mtbl + .getPartitionKeys()), part.getValues()); + PrincipalPrivilegeSet partAuth = this + .getPartitionPrivilegeSet(dbName, tblName, + partName, userName, groupNames); + part.setPrivileges(partAuth); + } + } + } + success = commitTransaction(); + return parts; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override + public Partition getPartitionWithAuth(String dbName, String tblName, + List partVals, String user_name, List group_names) + throws NoSuchObjectException, MetaException, InvalidObjectException { + boolean success = false; + try { + openTransaction(); + MPartition mpart = getMPartition(dbName, tblName, partVals); + if (mpart == null) { + commitTransaction(); + throw new NoSuchObjectException("partition values=" + + partVals.toString()); + } + Partition part = null; + MTable mtbl = mpart.getTable(); + part = convertToPart(mpart); + if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get( + "PARTITION_LEVEL_PRIVILEGE"))) { + String partName = Warehouse.makePartName( + this.convertToFieldSchemas(mtbl.getPartitionKeys()), + partVals); + PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet( + dbName, tblName, partName, user_name, group_names); + part.setPrivileges(partAuth); + } + + success = commitTransaction(); + return part; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private List convertToParts(List mparts) + throws MetaException { + return convertToParts(mparts, null); + } + + private List convertToParts(List src, + List dest) throws MetaException { + if (src == null) { + return dest; + } + if (dest == null) { + dest = new ArrayList(src.size()); + } + for (MPartition mp : src) { + dest.add(convertToPart(mp)); + } + return dest; + } + + private List convertToParts(String dbName, String tblName, + List mparts) throws MetaException { + List parts = new ArrayList(mparts.size()); + for (MPartition mp : mparts) { + parts.add(convertToPart(dbName, tblName, mp)); + } + return parts; + } + + // TODO:pc implement max + @Override + public List listPartitionNames(String dbName, String tableName, + short max) throws MetaException { + List pns = null; + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing getPartitionNames"); + pns = getPartitionNamesNoTxn(dbName, tableName, max); + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return pns; + } + + private List getPartitionNamesNoTxn(String dbName, + String tableName, short max) { + List pns = new ArrayList(); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); + Query q = pm + .newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where table.database.name == t1 && table.tableName == t2 " + + "order by partitionName asc"); + q.declareParameters("java.lang.String t1, java.lang.String t2"); + q.setResult("partitionName"); + + if (max > 0) { + q.setRange(0, max); + } + Collection names = (Collection) q.execute(dbName, tableName); + for (Iterator i = names.iterator(); i.hasNext();) { + pns.add((String) i.next()); + } + return pns; + } + + /** + * Retrieves a Collection of partition-related results from the database + * that match the partial specification given for a specific table. + * + * @param dbName + * the name of the database + * @param tableName + * the name of the table + * @param part_vals + * the partial specification values + * @param max_parts + * the maximum number of partitions to return + * @param resultsCol + * the metadata column of the data to return, e.g. partitionName, + * etc. if resultsCol is empty or null, a collection of + * MPartition objects is returned + * @throws NoSuchObjectException + * @results A Collection of partition-related items from the db that match + * the partial spec for a table. The type of each item in the + * collection corresponds to the column you want results for. E.g., + * if resultsCol is partitionName, the Collection has types of + * String, and if resultsCol is null, the types are MPartition. + */ + private Collection getPartitionPsQueryResults(String dbName, + String tableName, List part_vals, short max_parts, + String resultsCol) throws MetaException, NoSuchObjectException { + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); + Table table = getTable(dbName, tableName); + + if (table == null) { + throw new NoSuchObjectException(dbName + "." + tableName + + " table not found"); + } + + List partCols = table.getPartitionKeys(); + int numPartKeys = partCols.size(); + if (part_vals.size() > numPartKeys) { + throw new MetaException("Incorrect number of partition values"); + } + + partCols = partCols.subList(0, part_vals.size()); + // Construct a pattern of the form: + // partKey=partVal/partKey2=partVal2/... + // where partVal is either the escaped partition value given as input, + // or a regex of the form ".*" + // This works because the "=" and "/" separating key names and partition + // key/values + // are not escaped. + String partNameMatcher = Warehouse.makePartName(partCols, part_vals, + ".*"); + // add ".*" to the regex to match anything else afterwards the partial + // spec. + if (part_vals.size() < numPartKeys) { + partNameMatcher += ".*"; + } + + Query q = pm.newQuery(MPartition.class); + StringBuilder queryFilter = new StringBuilder( + "table.database.name == dbName"); + queryFilter.append(" && table.tableName == tableName"); + queryFilter.append(" && partitionName.matches(partialRegex)"); + q.setFilter(queryFilter.toString()); + q.declareParameters("java.lang.String dbName, " + + "java.lang.String tableName, java.lang.String partialRegex"); + + if (max_parts >= 0) { + // User specified a row limit, set it on the Query + q.setRange(0, max_parts); + } + if (resultsCol != null && !resultsCol.isEmpty()) { + q.setResult(resultsCol); + } + + return (Collection) q.execute(dbName, tableName, partNameMatcher); + } + + @Override + public List listPartitionsPsWithAuth(String db_name, + String tbl_name, List part_vals, short max_parts, + String userName, List groupNames) throws MetaException, + InvalidObjectException, NoSuchObjectException { + List partitions = new ArrayList(); + boolean success = false; + try { + openTransaction(); + LOG.debug("executing listPartitionNamesPsWithAuth"); + Collection parts = getPartitionPsQueryResults(db_name, tbl_name, + part_vals, max_parts, null); + MTable mtbl = getMTable(db_name, tbl_name); + for (Object o : parts) { + Partition part = convertToPart((MPartition) o); + // set auth privileges + if (null != userName + && null != groupNames + && "TRUE".equalsIgnoreCase(mtbl.getParameters().get( + "PARTITION_LEVEL_PRIVILEGE"))) { + String partName = Warehouse + .makePartName(this.convertToFieldSchemas(mtbl + .getPartitionKeys()), part.getValues()); + PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet( + db_name, tbl_name, partName, userName, groupNames); + part.setPrivileges(partAuth); + } + partitions.add(part); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return partitions; + } + + @Override + public List listPartitionNamesPs(String dbName, String tableName, + List part_vals, short max_parts) throws MetaException, + NoSuchObjectException { + List partitionNames = new ArrayList(); + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listPartitionNamesPs"); + Collection names = getPartitionPsQueryResults(dbName, tableName, + part_vals, max_parts, "partitionName"); + for (Object o : names) { + partitionNames.add((String) o); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return partitionNames; + } + + // TODO:pc implement max + private List listMPartitions(String dbName, String tableName, + int max) { + + boolean success = false; + List mparts = null; + try { + openTransaction(); + LOG.debug("Executing listMPartitions"); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); + Query query = pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.setOrdering("partitionName ascending"); + if (max > 0) { + query.setRange(0, max); + } + mparts = (List) query.execute(tableName, dbName); + LOG.debug("Done executing query for listMPartitions"); + pm.retrieveAll(mparts); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMPartitions " + + mparts); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mparts; + } + + @Override + public List getPartitionsByNames(String dbName, String tblName, + List partNames) throws MetaException, NoSuchObjectException { + return getPartitionsByNamesInternal(dbName, tblName, partNames, true, + true); + } + + protected List getPartitionsByNamesInternal(String dbName, + String tblName, final List partNames, boolean allowSql, + boolean allowJdo) throws MetaException, NoSuchObjectException { + return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + @Override + protected List getSqlResult( + GetHelper> ctx) throws MetaException { + return directSql.getPartitionsViaSqlFilter(dbName, tblName, + partNames, null); + } + + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException, + NoSuchObjectException { + return getPartitionsViaOrmFilter(dbName, tblName, partNames); + } + }.run(false); + } + + @Override + public boolean getPartitionsByExpr(String dbName, String tblName, + byte[] expr, String defaultPartitionName, short maxParts, + List result) throws TException { + return getPartitionsByExprInternal(dbName, tblName, expr, + defaultPartitionName, maxParts, result, true, true); + } + + protected boolean getPartitionsByExprInternal(String dbName, + String tblName, final byte[] expr, + final String defaultPartitionName, final short maxParts, + List result, boolean allowSql, boolean allowJdo) + throws TException { + assert result != null; + + // We will try pushdown first, so make the filter. This will also + // validate the expression, + // if serialization fails we will throw incompatible metastore error to + // the client. + String filter = null; + try { + filter = expressionProxy.convertExprToFilter(expr); + } catch (MetaException ex) { + throw new IMetaStoreClient.IncompatibleMetastoreException( + ex.getMessage()); + } + + // Make a tree out of the filter. + // TODO: this is all pretty ugly. The only reason we need all these + // transformations + // is to maintain support for simple filters for HCat users that query + // metastore. + // If forcing everyone to use thick client is out of the question, maybe + // we could + // parse the filter into standard hive expressions and not all this + // separate tree + // Filter.g stuff. That way this method and ...ByFilter would just be + // merged. + final ExpressionTree exprTree = makeExpressionTree(filter); + + final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); + result.addAll(new GetListHelper(dbName, tblName, allowSql, + allowJdo) { + @Override + protected List getSqlResult( + GetHelper> ctx) throws MetaException { + // If we have some sort of expression tree, try SQL filter + // pushdown. + List result = null; + if (exprTree != null) { + result = directSql.getPartitionsViaSqlFilter( + ctx.getTable(), exprTree, null); + } + if (result == null) { + // We couldn't do SQL filter pushdown. Get names via normal + // means. + List partNames = new LinkedList(); + hasUnknownPartitions + .set(getPartitionNamesPrunedByExprNoTxn( + ctx.getTable(), expr, defaultPartitionName, + maxParts, partNames)); + result = directSql.getPartitionsViaSqlFilter(dbName, + tblName, partNames, null); + } + return result; + } + + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException, + NoSuchObjectException { + // If we have some sort of expression tree, try JDOQL filter + // pushdown. + List result = null; + if (exprTree != null) { + result = getPartitionsViaOrmFilter(ctx.getTable(), + exprTree, maxParts, false); + } + if (result == null) { + // We couldn't do JDOQL filter pushdown. Get names via + // normal means. + List partNames = new ArrayList(); + hasUnknownPartitions + .set(getPartitionNamesPrunedByExprNoTxn( + ctx.getTable(), expr, defaultPartitionName, + maxParts, partNames)); + result = getPartitionsViaOrmFilter(dbName, tblName, + partNames); + } + return result; + } + }.run(true)); + return hasUnknownPartitions.get(); + } + + private class LikeChecker extends ExpressionTree.TreeVisitor { + private boolean hasLike; + + public boolean hasLike() { + return hasLike; + } + + @Override + protected boolean shouldStop() { + return hasLike; + } + + @Override + protected void visit(LeafNode node) throws MetaException { + hasLike = hasLike || (node.operator == Operator.LIKE); + } + } + + /** + * Makes expression tree out of expr. + * + * @param filter + * Filter. + * @return Expression tree. Null if there was an error. + */ + private ExpressionTree makeExpressionTree(String filter) + throws MetaException { + // TODO: ExprNodeDesc is an expression tree, we could just use that and + // be rid of Filter.g. + if (filter == null || filter.isEmpty()) { + return ExpressionTree.EMPTY_TREE; + } + LOG.debug("Filter specified is " + filter); + ExpressionTree tree = null; + try { + tree = getFilterParser(filter).tree; + } catch (MetaException ex) { + LOG.info("Unable to make the expression tree from expression string [" + + filter + "]" + ex.getMessage()); // Don't log the stack, + // this is normal. + } + if (tree == null) { + return null; + } + // We suspect that LIKE pushdown into JDO is invalid; see HIVE-5134. + // Check for like here. + LikeChecker lc = new LikeChecker(); + tree.accept(lc); + return lc.hasLike() ? null : tree; + } + + /** + * Gets the partition names from a table, pruned using an expression. + * + * @param table + * Table. + * @param expr + * Expression. + * @param defaultPartName + * Default partition name from job config, if any. + * @param maxParts + * Maximum number of partition names to return. + * @param result + * The resulting names. + * @return Whether the result contains any unknown partitions. + */ + private boolean getPartitionNamesPrunedByExprNoTxn(Table table, + byte[] expr, String defaultPartName, short maxParts, + List result) throws MetaException { + result.addAll(getPartitionNamesNoTxn(table.getDbName(), + table.getTableName(), maxParts)); + List columnNames = new ArrayList(); + for (FieldSchema fs : table.getPartitionKeys()) { + columnNames.add(fs.getName()); + } + if (defaultPartName == null || defaultPartName.isEmpty()) { + defaultPartName = HiveConf.getVar(getConf(), + HiveConf.ConfVars.DEFAULTPARTITIONNAME); + } + return expressionProxy.filterPartitionsByExpr(columnNames, expr, + defaultPartName, result); + } + + /** + * Gets partition names from the table via ORM (JDOQL) filter pushdown. + * + * @param table + * The table. + * @param tree + * The expression tree from which JDOQL filter will be made. + * @param maxParts + * Maximum number of partitions to return. + * @param isValidatedFilter + * Whether the filter was pre-validated for JDOQL pushdown by a + * client (old hive client or non-hive one); if it was and we + * fail to create a filter, we will throw. + * @return Resulting partitions. Can be null if isValidatedFilter is false, + * and there was error deriving the JDO filter. + */ + private List getPartitionsViaOrmFilter(Table table, + ExpressionTree tree, short maxParts, boolean isValidatedFilter) + throws MetaException { + Map params = new HashMap(); + String jdoFilter = makeQueryFilterString(table.getDbName(), table, + tree, params, isValidatedFilter); + if (jdoFilter == null) { + assert !isValidatedFilter; + return null; + } + Query query = pm.newQuery(MPartition.class, jdoFilter); + if (maxParts >= 0) { + // User specified a row limit, set it on the Query + query.setRange(0, maxParts); + } + + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); + + @SuppressWarnings("unchecked") + List mparts = (List) query + .executeWithMap(params); + + LOG.debug("Done executing query for getPartitionsViaOrmFilter"); + pm.retrieveAll(mparts); // TODO: why is this inconsistent with what we + // get by names? + LOG.debug("Done retrieving all objects for getPartitionsViaOrmFilter"); + List results = convertToParts(mparts); + query.closeAll(); + return results; + } + + private static class Out { + public T val; + } + + /** + * Gets partition names from the table via ORM (JDOQL) name filter. + * + * @param dbName + * Database name. + * @param tblName + * Table name. + * @param partNames + * Partition names to get the objects for. + * @return Resulting partitions. + */ + private List getPartitionsViaOrmFilter(String dbName, + String tblName, List partNames) throws MetaException { + if (partNames.isEmpty()) { + return new ArrayList(); + } + Out query = new Out(); + List mparts = null; + try { + mparts = getMPartitionsViaOrmFilter(dbName, tblName, partNames, + query); + return convertToParts(dbName, tblName, mparts); + } finally { + if (query.val != null) { + query.val.closeAll(); + } + } + } + + private void dropPartitionsNoTxn(String dbName, String tblName, + List partNames) { + ObjectPair> queryWithParams = getPartQueryWithParams( + dbName, tblName, partNames); + Query query = queryWithParams.getFirst(); + query.setClass(MPartition.class); + long deleted = query.deletePersistentAll(queryWithParams.getSecond()); + LOG.debug("Deleted " + deleted + " partition from store"); + query.closeAll(); + } + + /** + * Detaches column descriptors from storage descriptors; returns the set of + * unique CDs thus detached. This is done before dropping partitions because + * CDs are reused between SDs; so, we remove the links to delete SDs and + * then check the returned CDs to see if they are referenced by other SDs. + */ + private HashSet detachCdsFromSdsNoTxn(String dbName, + String tblName, List partNames) { + ObjectPair> queryWithParams = getPartQueryWithParams( + dbName, tblName, partNames); + Query query = queryWithParams.getFirst(); + query.setClass(MPartition.class); + query.setResult("sd"); + @SuppressWarnings("unchecked") + List sds = (List) query + .executeWithMap(queryWithParams.getSecond()); + HashSet candidateCds = new HashSet(); + for (MStorageDescriptor sd : sds) { + if (sd != null && sd.getCD() != null) { + candidateCds.add(sd.getCD()); + sd.setCD(null); + } + } + return candidateCds; + } + + private List getMPartitionsViaOrmFilter(String dbName, + String tblName, List partNames, Out out) { + ObjectPair> queryWithParams = getPartQueryWithParams( + dbName, tblName, partNames); + Query query = out.val = queryWithParams.getFirst(); + query.setResultClass(MPartition.class); + query.setClass(MPartition.class); + query.setOrdering("partitionName ascending"); + + @SuppressWarnings("unchecked") + List result = (List) query + .executeWithMap(queryWithParams.getSecond()); + return result; + } + + private ObjectPair> getPartQueryWithParams( + String dbName, String tblName, List partNames) { + StringBuilder sb = new StringBuilder( + "table.tableName == t1 && table.database.name == t2 && ("); + int n = 0; + Map params = new HashMap(); + for (Iterator itr = partNames.iterator(); itr.hasNext();) { + String pn = "p" + n; + n++; + String part = itr.next(); + params.put(pn, part); + sb.append("partitionName == ").append(pn); + sb.append(" || "); + } + sb.setLength(sb.length() - 4); // remove the last " || " + sb.append(')'); + + Query query = pm.newQuery(); + query.setFilter(sb.toString()); + + LOG.debug(" JDOQL filter is " + sb.toString()); + params.put("t1", tblName.trim().toLowerCase()); + params.put("t2", dbName.trim().toLowerCase()); + + query.declareParameters(makeParameterDeclarationString(params)); + return new ObjectPair>(query, params); + } + + @Override + public List getPartitionsByFilter(String dbName, String tblName, + String filter, short maxParts) throws MetaException, + NoSuchObjectException { + return getPartitionsByFilterInternal(dbName, tblName, filter, maxParts, + true, true); + } + + /** + * Helper class for getting stuff w/transaction, direct SQL, perf logging, + * etc. + */ + private abstract class GetHelper { + private final boolean isInTxn, doTrace, allowJdo; + private boolean doUseDirectSql; + private long start; + private Table table; + protected final String dbName, tblName; + private boolean success = false; + protected T results = null; + + public GetHelper(String dbName, String tblName, boolean allowSql, + boolean allowJdo) throws MetaException { + assert allowSql || allowJdo; + this.allowJdo = allowJdo; + this.dbName = dbName.toLowerCase(); + this.tblName = tblName.toLowerCase(); + this.doTrace = LOG.isDebugEnabled(); + this.isInTxn = isActiveTransaction(); + + // SQL usage inside a larger transaction (e.g. droptable) may not be + // desirable because + // some databases (e.g. Postgres) abort the entire transaction when + // any query fails, so + // the fallback from failed SQL to JDO is not possible. + boolean isConfigEnabled = HiveConf.getBoolVar(getConf(), + ConfVars.METASTORE_TRY_DIRECT_SQL) + && (HiveConf.getBoolVar(getConf(), + ConfVars.METASTORE_TRY_DIRECT_SQL_DDL) || !isInTxn); + if (!allowJdo && isConfigEnabled + && !directSql.isCompatibleDatastore()) { + throw new MetaException("SQL is not operational"); // test path; + // SQL is + // enabled + // and + // broken. + } + this.doUseDirectSql = allowSql && isConfigEnabled + && directSql.isCompatibleDatastore(); + } + + protected abstract String describeResult(); + + protected abstract T getSqlResult(GetHelper ctx) + throws MetaException; + + protected abstract T getJdoResult(GetHelper ctx) + throws MetaException, NoSuchObjectException; + + public T run(boolean initTable) throws MetaException, + NoSuchObjectException { + try { + start(initTable); + if (doUseDirectSql) { + try { + setResult(getSqlResult(this)); + } catch (Exception ex) { + handleDirectSqlError(ex); + } + } + if (!doUseDirectSql) { + setResult(getJdoResult(this)); + } + return commit(); + } catch (NoSuchObjectException ex) { + throw ex; + } catch (MetaException ex) { + throw ex; + } catch (Exception ex) { + LOG.error("", ex); + throw new MetaException(ex.getMessage()); + } finally { + close(); + } + } + + private void start(boolean initTable) throws MetaException, + NoSuchObjectException { + start = doTrace ? System.nanoTime() : 0; + openTransaction(); + if (initTable) { + table = ensureGetTable(dbName, tblName); + } + } + + private boolean setResult(T results) { + this.results = results; + return this.results != null; + } + + private void handleDirectSqlError(Exception ex) throws MetaException, + NoSuchObjectException { + LOG.error("Direct SQL failed" + + (allowJdo ? ", falling back to ORM" : ""), ex); + if (!allowJdo) { + if (ex instanceof MetaException) { + throw (MetaException) ex; + } + throw new MetaException(ex.getMessage()); + } + if (!isInTxn) { + rollbackTransaction(); + start = doTrace ? System.nanoTime() : 0; + openTransaction(); + if (table != null) { + table = ensureGetTable(dbName, tblName); + } + } else { + start = doTrace ? System.nanoTime() : 0; + } + doUseDirectSql = false; + } + + public void disableDirectSql() { + this.doUseDirectSql = false; + } + + private T commit() { + success = commitTransaction(); + if (doTrace) { + LOG.debug(describeResult() + " retrieved using " + + (doUseDirectSql ? "SQL" : "ORM") + " in " + + ((System.nanoTime() - start) / 1000000.0) + "ms"); + } + return results; + } + + private void close() { + if (!success) { + rollbackTransaction(); + } + } + + public Table getTable() { + return table; + } + } + + private abstract class GetListHelper extends GetHelper> { + public GetListHelper(String dbName, String tblName, boolean allowSql, + boolean allowJdo) throws MetaException { + super(dbName, tblName, allowSql, allowJdo); + } + + @Override + protected String describeResult() { + return results.size() + " entries"; + } + } + + private abstract class GetStatHelper extends GetHelper { + public GetStatHelper(String dbName, String tblName, boolean allowSql, + boolean allowJdo) throws MetaException { + super(dbName, tblName, allowSql, allowJdo); + } + + @Override + protected String describeResult() { + return "statistics for " + + (results == null ? 0 : results.getStatsObjSize()) + + " columns"; + } + } + + protected List getPartitionsByFilterInternal(String dbName, + String tblName, String filter, final short maxParts, + boolean allowSql, boolean allowJdo) throws MetaException, + NoSuchObjectException { + final ExpressionTree tree = (filter != null && !filter.isEmpty()) ? getFilterParser(filter).tree + : ExpressionTree.EMPTY_TREE; + + return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + @Override + protected List getSqlResult( + GetHelper> ctx) throws MetaException { + List parts = directSql.getPartitionsViaSqlFilter(ctx + .getTable(), tree, (maxParts < 0) ? null + : (int) maxParts); + if (parts == null) { + // Cannot push down SQL filter. The message has been logged + // internally. + // This is not an error so don't roll back, just go to JDO. + ctx.disableDirectSql(); + } + return parts; + } + + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException, + NoSuchObjectException { + return getPartitionsViaOrmFilter(ctx.getTable(), tree, + maxParts, true); + } + }.run(true); + } + + /** + * Gets the table object for a given table, throws if anything goes wrong. + * + * @param dbName + * Database name. + * @param tblName + * Table name. + * @return Table object. + */ + private MTable ensureGetMTable(String dbName, String tblName) + throws NoSuchObjectException, MetaException { + MTable mtable = getMTable(dbName, tblName); + if (mtable == null) { + throw new NoSuchObjectException( + "Specified database/table does not exist : " + dbName + "." + + tblName); + } + return mtable; + } + + private Table ensureGetTable(String dbName, String tblName) + throws NoSuchObjectException, MetaException { + return convertToTable(ensureGetMTable(dbName, tblName)); + } + + private FilterParser getFilterParser(String filter) throws MetaException { + FilterLexer lexer = new FilterLexer(new ANTLRNoCaseStringStream(filter)); + CommonTokenStream tokens = new CommonTokenStream(lexer); + + FilterParser parser = new FilterParser(tokens); + try { + parser.filter(); + } catch (RecognitionException re) { + throw new MetaException( + "Error parsing partition filter; lexer error: " + + lexer.errorMsg + "; exception " + re); + } + + if (lexer.errorMsg != null) { + throw new MetaException("Error parsing partition filter : " + + lexer.errorMsg); + } + return parser; + } + + /** + * Makes a JDO query filter string. Makes a JDO query filter string for + * tables or partitions. + * + * @param dbName + * Database name. + * @param table + * Table. If null, the query returned is over tables in a + * database. If not null, the query returned is over partitions + * in a table. + * @param filter + * The filter from which JDOQL filter will be made. + * @param params + * Parameters for the filter. Some parameters may be added here. + * @return Resulting filter. + */ + private String makeQueryFilterString(String dbName, MTable mtable, + String filter, Map params) throws MetaException { + ExpressionTree tree = (filter != null && !filter.isEmpty()) ? getFilterParser(filter).tree + : ExpressionTree.EMPTY_TREE; + return makeQueryFilterString(dbName, convertToTable(mtable), tree, + params, true); + } + + /** + * Makes a JDO query filter string for tables or partitions. + * + * @param dbName + * Database name. + * @param table + * Table. If null, the query returned is over tables in a + * database. If not null, the query returned is over partitions + * in a table. + * @param tree + * The expression tree from which JDOQL filter will be made. + * @param params + * Parameters for the filter. Some parameters may be added here. + * @param isValidatedFilter + * Whether the filter was pre-validated for JDOQL pushdown by the + * client; if it was and we fail to create a filter, we will + * throw. + * @return Resulting filter. Can be null if isValidatedFilter is false, and + * there was error. + */ + private String makeQueryFilterString(String dbName, Table table, + ExpressionTree tree, Map params, + boolean isValidatedFilter) throws MetaException { + assert tree != null; + FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter); + if (table != null) { + queryBuilder + .append("table.tableName == t1 && table.database.name == t2"); + params.put("t1", table.getTableName()); + params.put("t2", table.getDbName()); + } else { + queryBuilder.append("database.name == dbName"); + params.put("dbName", dbName); + } + + tree.generateJDOFilterFragment(getConf(), table, params, queryBuilder); + if (queryBuilder.hasError()) { + assert !isValidatedFilter; + LOG.info("JDO filter pushdown cannot be used: " + + queryBuilder.getErrorMessage()); + return null; + } + String jdoFilter = queryBuilder.getFilter(); + LOG.debug("jdoFilter = " + jdoFilter); + return jdoFilter; + } + + private String makeParameterDeclarationString(Map params) { + // Create the parameter declaration string + StringBuilder paramDecl = new StringBuilder(); + for (String key : params.keySet()) { + paramDecl.append(", java.lang.String " + key); + } + return paramDecl.toString(); + } + + private String makeParameterDeclarationStringObj(Map params) { + // Create the parameter declaration string + StringBuilder paramDecl = new StringBuilder(); + for (Entry entry : params.entrySet()) { + paramDecl.append(", "); + paramDecl.append(entry.getValue().getClass().getName()); + paramDecl.append(" "); + paramDecl.append(entry.getKey()); + } + return paramDecl.toString(); + } + + @Override + public List listTableNamesByFilter(String dbName, String filter, + short maxTables) throws MetaException { + boolean success = false; + List tableNames = new ArrayList(); + try { + openTransaction(); + LOG.debug("Executing listTableNamesByFilter"); + dbName = dbName.toLowerCase().trim(); + Map params = new HashMap(); + String queryFilterString = makeQueryFilterString(dbName, null, + filter, params); + Query query = pm.newQuery(MTable.class); + query.declareImports("import java.lang.String"); + query.setResult("tableName"); + query.setResultClass(java.lang.String.class); + if (maxTables >= 0) { + query.setRange(0, maxTables); + } + LOG.debug("filter specified is " + filter + "," + + " JDOQL filter is " + queryFilterString); + for (Entry entry : params.entrySet()) { + LOG.debug("key: " + entry.getKey() + " value: " + + entry.getValue() + " class: " + + entry.getValue().getClass().getName()); + } + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + query.setFilter(queryFilterString); + Collection names = (Collection) query.executeWithMap(params); + // have to emulate "distinct", otherwise tables with the same name + // may be returned + Set tableNamesSet = new HashSet(); + for (Iterator i = names.iterator(); i.hasNext();) { + tableNamesSet.add((String) i.next()); + } + tableNames = new ArrayList(tableNamesSet); + LOG.debug("Done executing query for listTableNamesByFilter"); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listTableNamesByFilter"); + + } finally { + if (!success) { + rollbackTransaction(); + } + } + return tableNames; + } + + @Override + public List listPartitionNamesByFilter(String dbName, + String tableName, String filter, short maxParts) + throws MetaException { + boolean success = false; + List partNames = new ArrayList(); + try { + openTransaction(); + LOG.debug("Executing listMPartitionNamesByFilter"); + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); + + MTable mtable = getMTable(dbName, tableName); + if (mtable == null) { + // To be consistent with the behavior of listPartitionNames, if + // the + // table or db does not exist, we return an empty list + return partNames; + } + Map params = new HashMap(); + String queryFilterString = makeQueryFilterString(dbName, mtable, + filter, params); + Query query = pm + .newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where " + queryFilterString); + + if (maxParts >= 0) { + // User specified a row limit, set it on the Query + query.setRange(0, maxParts); + } + + LOG.debug("Filter specified is " + filter + "," + + " JDOQL filter is " + queryFilterString); + LOG.debug("Parms is " + params); + + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); + query.setResult("partitionName"); + + Collection names = (Collection) query.executeWithMap(params); + partNames = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + partNames.add((String) i.next()); + } + + LOG.debug("Done executing query for listMPartitionNamesByFilter"); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return partNames; + } + + @Override + public void alterTable(String dbname, String name, Table newTable) + throws InvalidObjectException, MetaException { + boolean success = false; + try { + openTransaction(); + name = name.toLowerCase(); + dbname = dbname.toLowerCase(); + MTable newt = convertToMTable(newTable); + if (newt == null) { + throw new InvalidObjectException("new table is invalid"); + } + + MTable oldt = getMTable(dbname, name); + if (oldt == null) { + throw new MetaException("table " + name + " doesn't exist"); + } + + // For now only alter name, owner, paramters, cols, bucketcols are + // allowed + oldt.setTableName(newt.getTableName().toLowerCase()); + oldt.setParameters(newt.getParameters()); + oldt.setOwner(newt.getOwner()); + // Fully copy over the contents of the new SD into the old SD, + // so we don't create an extra SD in the metastore db that has no + // references. + copyMSD(newt.getSd(), oldt.getSd()); + oldt.setDatabase(newt.getDatabase()); + oldt.setRetention(newt.getRetention()); + oldt.setPartitionKeys(newt.getPartitionKeys()); + oldt.setTableType(newt.getTableType()); + oldt.setLastAccessTime(newt.getLastAccessTime()); + oldt.setViewOriginalText(newt.getViewOriginalText()); + oldt.setViewExpandedText(newt.getViewExpandedText()); + + // commit the changes + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override + public void alterIndex(String dbname, String baseTblName, String name, + Index newIndex) throws InvalidObjectException, MetaException { + boolean success = false; + try { + openTransaction(); + name = name.toLowerCase(); + baseTblName = baseTblName.toLowerCase(); + dbname = dbname.toLowerCase(); + MIndex newi = convertToMIndex(newIndex); + if (newi == null) { + throw new InvalidObjectException("new index is invalid"); + } + + MIndex oldi = getMIndex(dbname, baseTblName, name); + if (oldi == null) { + throw new MetaException("index " + name + " doesn't exist"); + } + + // For now only alter paramters are allowed + oldi.setParameters(newi.getParameters()); + + // commit the changes + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private void alterPartitionNoTxn(String dbname, String name, + List part_vals, Partition newPart) + throws InvalidObjectException, MetaException { + name = name.toLowerCase(); + dbname = dbname.toLowerCase(); + MPartition oldp = getMPartition(dbname, name, part_vals); + MPartition newp = convertToMPart(newPart, false); + if (oldp == null || newp == null) { + throw new InvalidObjectException("partition does not exist."); + } + oldp.setValues(newp.getValues()); + oldp.setPartitionName(newp.getPartitionName()); + oldp.setParameters(newPart.getParameters()); + if (!TableType.VIRTUAL_VIEW.name().equals( + oldp.getTable().getTableType())) { + copyMSD(newp.getSd(), oldp.getSd()); + } + if (newp.getCreateTime() != oldp.getCreateTime()) { + oldp.setCreateTime(newp.getCreateTime()); + } + if (newp.getLastAccessTime() != oldp.getLastAccessTime()) { + oldp.setLastAccessTime(newp.getLastAccessTime()); + } + } + + @Override + public void alterPartition(String dbname, String name, + List part_vals, Partition newPart) + throws InvalidObjectException, MetaException { + boolean success = false; + Exception e = null; + try { + openTransaction(); + alterPartitionNoTxn(dbname, name, part_vals, newPart); + // commit the changes + success = commitTransaction(); + } catch (Exception exception) { + e = exception; + } finally { + if (!success) { + rollbackTransaction(); + MetaException metaException = new MetaException( + "The transaction for alter partition did not commit successfully."); + if (e != null) { + metaException.initCause(e); + } + throw metaException; + } + } + } + + @Override + public void alterPartitions(String dbname, String name, + List> part_vals, List newParts) + throws InvalidObjectException, MetaException { + boolean success = false; + Exception e = null; + try { + openTransaction(); + Iterator> part_val_itr = part_vals.iterator(); + for (Partition tmpPart : newParts) { + List tmpPartVals = part_val_itr.next(); + alterPartitionNoTxn(dbname, name, tmpPartVals, tmpPart); + } + // commit the changes + success = commitTransaction(); + } catch (Exception exception) { + e = exception; + } finally { + if (!success) { + rollbackTransaction(); + MetaException metaException = new MetaException( + "The transaction for alter partition did not commit successfully."); + if (e != null) { + metaException.initCause(e); + } + throw metaException; + } + } + } + + private void copyMSD(MStorageDescriptor newSd, MStorageDescriptor oldSd) { + oldSd.setLocation(newSd.getLocation()); + MColumnDescriptor oldCD = oldSd.getCD(); + // If the columns of the old column descriptor != the columns of the new + // one, + // then change the old storage descriptor's column descriptor. + // Convert the MFieldSchema's to their thrift object counterparts, + // because we maintain + // datastore identity (i.e., identity of the model objects are managed + // by JDO, + // not the application). + if (!(oldSd != null && oldSd.getCD() != null + && oldSd.getCD().getCols() != null && newSd != null + && newSd.getCD() != null && newSd.getCD().getCols() != null && convertToFieldSchemas( + newSd.getCD().getCols()).equals( + convertToFieldSchemas(oldSd.getCD().getCols())))) { + oldSd.setCD(newSd.getCD()); + } + + // If oldCd does not have any more references, then we should delete it + // from the backend db + removeUnusedColumnDescriptor(oldCD); + oldSd.setBucketCols(newSd.getBucketCols()); + oldSd.setCompressed(newSd.isCompressed()); + oldSd.setInputFormat(newSd.getInputFormat()); + oldSd.setOutputFormat(newSd.getOutputFormat()); + oldSd.setNumBuckets(newSd.getNumBuckets()); + oldSd.getSerDeInfo().setName(newSd.getSerDeInfo().getName()); + oldSd.getSerDeInfo().setSerializationLib( + newSd.getSerDeInfo().getSerializationLib()); + oldSd.getSerDeInfo() + .setParameters(newSd.getSerDeInfo().getParameters()); + oldSd.setSkewedColNames(newSd.getSkewedColNames()); + oldSd.setSkewedColValues(newSd.getSkewedColValues()); + oldSd.setSkewedColValueLocationMaps(newSd + .getSkewedColValueLocationMaps()); + oldSd.setSortCols(newSd.getSortCols()); + oldSd.setParameters(newSd.getParameters()); + oldSd.setStoredAsSubDirectories(newSd.isStoredAsSubDirectories()); + } + + /** + * Checks if a column descriptor has any remaining references by storage + * descriptors in the db. If it does not, then delete the CD. If it does, + * then do nothing. + * + * @param oldCD + * the column descriptor to delete if it is no longer referenced + * anywhere + */ + private void removeUnusedColumnDescriptor(MColumnDescriptor oldCD) { + if (oldCD == null) { + return; + } + + boolean success = false; + try { + openTransaction(); + LOG.debug("execute removeUnusedColumnDescriptor"); + List referencedSDs = listStorageDescriptorsWithCD( + oldCD, 1); + // if no other SD references this CD, we can throw it out. + if (referencedSDs != null && referencedSDs.isEmpty()) { + pm.retrieve(oldCD); + pm.deletePersistent(oldCD); + } + success = commitTransaction(); + LOG.debug("successfully deleted a CD in removeUnusedColumnDescriptor"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + /** + * Called right before an action that would drop a storage descriptor. This + * function makes the SD's reference to a CD null, and then deletes the CD + * if it no longer is referenced in the table. + * + * @param msd + * the storage descriptor to drop + */ + private void preDropStorageDescriptor(MStorageDescriptor msd) { + if (msd == null || msd.getCD() == null) { + return; + } + + MColumnDescriptor mcd = msd.getCD(); + // Because there is a 1-N relationship between CDs and SDs, + // we must set the SD's CD to null first before dropping the storage + // descriptor + // to satisfy foriegn key constraints. + msd.setCD(null); + removeUnusedColumnDescriptor(mcd); + } + + /** + * Get a list of storage descriptors that reference a particular Column + * Descriptor + * + * @param oldCD + * the column descriptor to get storage descriptors for + * @param maxSDs + * the maximum number of SDs to return + * @return a list of storage descriptors + */ + private List listStorageDescriptorsWithCD( + MColumnDescriptor oldCD, long maxSDs) { + boolean success = false; + List sds = null; + try { + openTransaction(); + LOG.debug("Executing listStorageDescriptorsWithCD"); + Query query = pm.newQuery(MStorageDescriptor.class, + "this.cd == inCD"); + query.declareParameters("MColumnDescriptor inCD"); + if (maxSDs >= 0) { + // User specified a row limit, set it on the Query + query.setRange(0, maxSDs); + } + sds = (List) query.execute(oldCD); + LOG.debug("Done executing query for listStorageDescriptorsWithCD"); + pm.retrieveAll(sds); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listStorageDescriptorsWithCD"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return sds; + } + + @Override + public boolean addIndex(Index index) throws InvalidObjectException, + MetaException { + boolean commited = false; + try { + openTransaction(); + MIndex idx = convertToMIndex(index); + pm.makePersistent(idx); + commited = commitTransaction(); + return true; + } finally { + if (!commited) { + rollbackTransaction(); + return false; + } + } + } + + private MIndex convertToMIndex(Index index) throws InvalidObjectException, + MetaException { + + StorageDescriptor sd = index.getSd(); + if (sd == null) { + throw new InvalidObjectException( + "Storage descriptor is not defined for index."); + } + + MStorageDescriptor msd = this.convertToMStorageDescriptor(sd); + MTable origTable = getMTable(index.getDbName(), + index.getOrigTableName()); + if (origTable == null) { + throw new InvalidObjectException( + "Original table does not exist for the given index."); + } + + MTable indexTable = getMTable(index.getDbName(), + index.getIndexTableName()); + if (indexTable == null) { + throw new InvalidObjectException( + "Underlying index table does not exist for the given index."); + } + + return new MIndex(index.getIndexName().toLowerCase(), origTable, + index.getCreateTime(), index.getLastAccessTime(), + index.getParameters(), indexTable, msd, + index.getIndexHandlerClass(), index.isDeferredRebuild()); + } + + @Override + public boolean dropIndex(String dbName, String origTableName, + String indexName) throws MetaException { + boolean success = false; + try { + openTransaction(); + MIndex index = getMIndex(dbName, origTableName, indexName); + if (index != null) { + pm.deletePersistent(index); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + private MIndex getMIndex(String dbName, String originalTblName, + String indexName) throws MetaException { + MIndex midx = null; + boolean commited = false; + try { + openTransaction(); + dbName = dbName.toLowerCase().trim(); + originalTblName = originalTblName.toLowerCase().trim(); + MTable mtbl = getMTable(dbName, originalTblName); + if (mtbl == null) { + commited = commitTransaction(); + return null; + } + + Query query = pm + .newQuery(MIndex.class, + "origTable.tableName == t1 && origTable.database.name == t2 && indexName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setUnique(true); + midx = (MIndex) query.execute(originalTblName, dbName, + indexName.toLowerCase()); + pm.retrieve(midx); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return midx; + } + + @Override + public Index getIndex(String dbName, String origTableName, String indexName) + throws MetaException { + openTransaction(); + MIndex mIndex = this.getMIndex(dbName, origTableName, indexName); + Index ret = convertToIndex(mIndex); + commitTransaction(); + return ret; + } + + private Index convertToIndex(MIndex mIndex) throws MetaException { + if (mIndex == null) { + return null; + } + + return new Index(mIndex.getIndexName(), mIndex.getIndexHandlerClass(), + mIndex.getOrigTable().getDatabase().getName(), mIndex + .getOrigTable().getTableName(), mIndex.getCreateTime(), + mIndex.getLastAccessTime(), mIndex.getIndexTable() + .getTableName(), this.convertToStorageDescriptor(mIndex + .getSd()), mIndex.getParameters(), + mIndex.getDeferredRebuild()); + + } + + @Override + public List getIndexes(String dbName, String origTableName, int max) + throws MetaException { + boolean success = false; + try { + openTransaction(); + List mIndexList = listMIndexes(dbName, origTableName, max); + List indexes = new ArrayList(mIndexList.size()); + for (MIndex midx : mIndexList) { + indexes.add(this.convertToIndex(midx)); + } + success = commitTransaction(); + return indexes; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private List listMIndexes(String dbName, String origTableName, + int max) { + boolean success = false; + List mindexes = null; + try { + openTransaction(); + LOG.debug("Executing listMIndexes"); + dbName = dbName.toLowerCase().trim(); + origTableName = origTableName.toLowerCase().trim(); + Query query = pm + .newQuery(MIndex.class, + "origTable.tableName == t1 && origTable.database.name == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mindexes = (List) query.execute(origTableName, dbName); + LOG.debug("Done executing query for listMIndexes"); + pm.retrieveAll(mindexes); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMIndexes"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mindexes; + } + + @Override + public List listIndexNames(String dbName, String origTableName, + short max) throws MetaException { + List pns = new ArrayList(); + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listIndexNames"); + dbName = dbName.toLowerCase().trim(); + origTableName = origTableName.toLowerCase().trim(); + Query q = pm + .newQuery("select indexName from org.apache.hadoop.hive.metastore.model.MIndex " + + "where origTable.database.name == t1 && origTable.tableName == t2 " + + "order by indexName asc"); + q.declareParameters("java.lang.String t1, java.lang.String t2"); + q.setResult("indexName"); + Collection names = (Collection) q.execute(dbName, origTableName); + for (Iterator i = names.iterator(); i.hasNext();) { + pns.add((String) i.next()); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return pns; + } + + @Override + public boolean addRole(String roleName, String ownerName) + throws InvalidObjectException, MetaException, NoSuchObjectException { + boolean success = false; + boolean commited = false; + try { + openTransaction(); + MRole nameCheck = this.getMRole(roleName); + if (nameCheck != null) { + throw new InvalidObjectException("Role " + roleName + + " already exists."); + } + int now = (int) (System.currentTimeMillis() / 1000); + MRole mRole = new MRole(roleName, now, ownerName); + pm.makePersistent(mRole); + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public boolean grantRole(Role role, String userName, + PrincipalType principalType, String grantor, + PrincipalType grantorType, boolean grantOption) + throws MetaException, NoSuchObjectException, InvalidObjectException { + boolean success = false; + boolean commited = false; + try { + openTransaction(); + MRoleMap roleMap = null; + try { + roleMap = this.getMSecurityUserRoleMap(userName, principalType, + role.getRoleName()); + } catch (Exception e) { + } + if (roleMap != null) { + throw new InvalidObjectException("Principal " + userName + + " already has the role " + role.getRoleName()); + } + if (principalType == PrincipalType.ROLE) { + validateRole(userName); + } + MRole mRole = getMRole(role.getRoleName()); + long now = System.currentTimeMillis() / 1000; + MRoleMap roleMember = new MRoleMap(userName, + principalType.toString(), mRole, (int) now, grantor, + grantorType.toString(), grantOption); + pm.makePersistent(roleMember); + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + /** + * Verify that role with given name exists, if not throw exception + * + * @param roleName + * @throws NoSuchObjectException + */ + private void validateRole(String roleName) throws NoSuchObjectException { + // if grantee is a role, check if it exists + MRole granteeRole = getMRole(roleName); + if (granteeRole == null) { + throw new NoSuchObjectException("Role " + roleName + + " does not exist"); + } + } + + @Override + public boolean revokeRole(Role role, String userName, + PrincipalType principalType, boolean grantOption) + throws MetaException, NoSuchObjectException { + boolean success = false; + try { + openTransaction(); + MRoleMap roleMember = getMSecurityUserRoleMap(userName, + principalType, role.getRoleName()); + if (grantOption) { + // Revoke with grant option - only remove the grant option but + // keep the role. + if (roleMember.getGrantOption()) { + roleMember.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with role " + + role.getRoleName()); + } + } else { + // No grant option in revoke, remove the whole role. + pm.deletePersistent(roleMember); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + private MRoleMap getMSecurityUserRoleMap(String userName, + PrincipalType principalType, String roleName) { + MRoleMap mRoleMember = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm + .newQuery(MRoleMap.class, + "principalName == t1 && principalType == t2 && role.roleName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setUnique(true); + mRoleMember = (MRoleMap) query.executeWithArray(userName, + principalType.toString(), roleName); + pm.retrieve(mRoleMember); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mRoleMember; + } + + @Override + public boolean removeRole(String roleName) throws MetaException, + NoSuchObjectException { + boolean success = false; + try { + openTransaction(); + MRole mRol = getMRole(roleName); + pm.retrieve(mRol); + if (mRol != null) { + // first remove all the membership, the membership that this + // role has + // been granted + List roleMap = listRoleMembers(mRol.getRoleName()); + if (roleMap.size() > 0) { + pm.deletePersistentAll(roleMap); + } + List roleMember = listMSecurityPrincipalMembershipRole( + mRol.getRoleName(), PrincipalType.ROLE); + if (roleMember.size() > 0) { + pm.deletePersistentAll(roleMember); + } + // then remove all the grants + List userGrants = listPrincipalGlobalGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (userGrants.size() > 0) { + pm.deletePersistentAll(userGrants); + } + List dbGrants = listPrincipalAllDBGrant( + mRol.getRoleName(), PrincipalType.ROLE); + if (dbGrants.size() > 0) { + pm.deletePersistentAll(dbGrants); + } + List tabPartGrants = listPrincipalAllTableGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (tabPartGrants.size() > 0) { + pm.deletePersistentAll(tabPartGrants); + } + List partGrants = listPrincipalAllPartitionGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (partGrants.size() > 0) { + pm.deletePersistentAll(partGrants); + } + List tblColumnGrants = listPrincipalAllTableColumnGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (tblColumnGrants.size() > 0) { + pm.deletePersistentAll(tblColumnGrants); + } + List partColumnGrants = listPrincipalAllPartitionColumnGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (partColumnGrants.size() > 0) { + pm.deletePersistentAll(partColumnGrants); + } + // finally remove the role + pm.deletePersistent(mRol); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + /** + * Get all the roles in the role hierarchy that this user and groupNames + * belongs to + * + * @param userName + * @param groupNames + * @return + */ + private Set listAllRolesInHierarchy(String userName, + List groupNames) { + List ret = new ArrayList(); + if (userName != null) { + ret.addAll(listRoles(userName, PrincipalType.USER)); + } + if (groupNames != null) { + for (String groupName : groupNames) { + ret.addAll(listRoles(groupName, PrincipalType.GROUP)); + } + } + // get names of these roles and its ancestors + Set roleNames = new HashSet(); + getAllRoleAncestors(roleNames, ret); + return roleNames; + } + + /** + * Add role names of parentRoles and its parents to processedRoles + * + * @param processedRoleNames + * @param parentRoles + */ + private void getAllRoleAncestors(Set processedRoleNames, + List parentRoles) { + for (MRoleMap parentRole : parentRoles) { + String parentRoleName = parentRole.getRole().getRoleName(); + if (!processedRoleNames.contains(parentRoleName)) { + // unprocessed role: get its parents, add it to processed, and + // call this + // function recursively + List nextParentRoles = listRoles(parentRoleName, + PrincipalType.ROLE); + processedRoleNames.add(parentRoleName); + getAllRoleAncestors(processedRoleNames, nextParentRoles); + } + } + } + + @SuppressWarnings("unchecked") + @Override + public List listRoles(String principalName, + PrincipalType principalType) { + boolean success = false; + List mRoleMember = null; + try { + openTransaction(); + LOG.debug("Executing listRoles"); + Query query = pm.newQuery(MRoleMap.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.setUnique(false); + mRoleMember = (List) query.executeWithArray( + principalName, principalType.toString()); + LOG.debug("Done executing query for listMSecurityUserRoleMap"); + pm.retrieveAll(mRoleMember); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityUserRoleMap"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + + if (principalType == PrincipalType.USER) { + // All users belong to public role implicitly, add that role + if (mRoleMember == null) { + mRoleMember = new ArrayList(); + } else { + mRoleMember = new ArrayList(mRoleMember); + } + MRole publicRole = new MRole(HiveMetaStore.PUBLIC, 0, + HiveMetaStore.PUBLIC); + mRoleMember.add(new MRoleMap(principalName, principalType + .toString(), publicRole, 0, null, null, false)); + } + return mRoleMember; + + } + + @SuppressWarnings("unchecked") + private List listMSecurityPrincipalMembershipRole( + final String roleName, final PrincipalType principalType) { + boolean success = false; + List mRoleMemebership = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalMembershipRole"); + Query query = pm.newQuery(MRoleMap.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mRoleMemebership = (List) query.execute(roleName, + principalType.toString()); + LOG.debug("Done executing query for listMSecurityPrincipalMembershipRole"); + pm.retrieveAll(mRoleMemebership); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mRoleMemebership; + } + + @Override + public Role getRole(String roleName) throws NoSuchObjectException { + MRole mRole = this.getMRole(roleName); + if (mRole == null) { + throw new NoSuchObjectException(roleName + + " role can not be found."); + } + Role ret = new Role(mRole.getRoleName(), mRole.getCreateTime(), + mRole.getOwnerName()); + return ret; + } + + private MRole getMRole(String roleName) { + MRole mrole = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MRole.class, "roleName == t1"); + query.declareParameters("java.lang.String t1"); + query.setUnique(true); + mrole = (MRole) query.execute(roleName); + pm.retrieve(mrole); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mrole; + } + + @Override + public List listRoleNames() { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listAllRoleNames"); + Query query = pm + .newQuery("select roleName from org.apache.hadoop.hive.metastore.model.MRole"); + query.setResult("roleName"); + Collection names = (Collection) query.execute(); + List roleNames = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + roleNames.add((String) i.next()); + } + success = commitTransaction(); + return roleNames; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override + public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + List groupNames) throws InvalidObjectException, + MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + List user = this.listPrincipalGlobalGrants( + userName, PrincipalType.USER); + if (user.size() > 0) { + Map> userPriv = new HashMap>(); + List grantInfos = new ArrayList( + user.size()); + for (int i = 0; i < user.size(); i++) { + MGlobalPrivilege item = user.get(i); + grantInfos.add(new PrivilegeGrantInfo(item + .getPrivilege(), item.getCreateTime(), item + .getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + userPriv.put(userName, grantInfos); + ret.setUserPrivileges(userPriv); + } + } + if (groupNames != null && groupNames.size() > 0) { + Map> groupPriv = new HashMap>(); + for (String groupName : groupNames) { + List group = this + .listPrincipalGlobalGrants(groupName, + PrincipalType.GROUP); + if (group.size() > 0) { + List grantInfos = new ArrayList( + group.size()); + for (int i = 0; i < group.size(); i++) { + MGlobalPrivilege item = group.get(i); + grantInfos.add(new PrivilegeGrantInfo(item + .getPrivilege(), item.getCreateTime(), item + .getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + groupPriv.put(groupName, grantInfos); + } + } + ret.setGroupPrivileges(groupPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + public List getDBPrivilege(String dbName, + String principalName, PrincipalType principalType) + throws InvalidObjectException, MetaException { + dbName = dbName.toLowerCase().trim(); + + if (principalName != null) { + List userNameDbPriv = this.listPrincipalDBGrants( + principalName, principalType, dbName); + if (userNameDbPriv != null && userNameDbPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameDbPriv.size()); + for (int i = 0; i < userNameDbPriv.size(); i++) { + MDBPrivilege item = userNameDbPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), + item.getCreateTime(), item.getGrantor(), + getPrincipalTypeFromStr(item.getGrantorType()), + item.getGrantOption())); + } + return grantInfos; + } + } + return new ArrayList(0); + } + + @Override + public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, + String userName, List groupNames) + throws InvalidObjectException, MetaException { + boolean commited = false; + dbName = dbName.toLowerCase().trim(); + + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map> dbUserPriv = new HashMap>(); + dbUserPriv.put(userName, + getDBPrivilege(dbName, userName, PrincipalType.USER)); + ret.setUserPrivileges(dbUserPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map> dbGroupPriv = new HashMap>(); + for (String groupName : groupNames) { + dbGroupPriv.put( + groupName, + getDBPrivilege(dbName, groupName, + PrincipalType.GROUP)); + } + ret.setGroupPrivileges(dbGroupPriv); + } + Set roleNames = listAllRolesInHierarchy(userName, + groupNames); + if (roleNames != null && roleNames.size() > 0) { + Map> dbRolePriv = new HashMap>(); + for (String roleName : roleNames) { + dbRolePriv + .put(roleName, + getDBPrivilege(dbName, roleName, + PrincipalType.ROLE)); + } + ret.setRolePrivileges(dbRolePriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, + String tableName, String partition, String userName, + List groupNames) throws InvalidObjectException, + MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + try { + openTransaction(); + if (userName != null) { + Map> partUserPriv = new HashMap>(); + partUserPriv.put( + userName, + getPartitionPrivilege(dbName, tableName, partition, + userName, PrincipalType.USER)); + ret.setUserPrivileges(partUserPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map> partGroupPriv = new HashMap>(); + for (String groupName : groupNames) { + partGroupPriv.put( + groupName, + getPartitionPrivilege(dbName, tableName, partition, + groupName, PrincipalType.GROUP)); + } + ret.setGroupPrivileges(partGroupPriv); + } + Set roleNames = listAllRolesInHierarchy(userName, + groupNames); + if (roleNames != null && roleNames.size() > 0) { + Map> partRolePriv = new HashMap>(); + for (String roleName : roleNames) { + partRolePriv.put( + roleName, + getPartitionPrivilege(dbName, tableName, partition, + roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(partRolePriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, + String tableName, String userName, List groupNames) + throws InvalidObjectException, MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + try { + openTransaction(); + if (userName != null) { + Map> tableUserPriv = new HashMap>(); + tableUserPriv.put( + userName, + getTablePrivilege(dbName, tableName, userName, + PrincipalType.USER)); + ret.setUserPrivileges(tableUserPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map> tableGroupPriv = new HashMap>(); + for (String groupName : groupNames) { + tableGroupPriv.put( + groupName, + getTablePrivilege(dbName, tableName, groupName, + PrincipalType.GROUP)); + } + ret.setGroupPrivileges(tableGroupPriv); + } + Set roleNames = listAllRolesInHierarchy(userName, + groupNames); + if (roleNames != null && roleNames.size() > 0) { + Map> tableRolePriv = new HashMap>(); + for (String roleName : roleNames) { + tableRolePriv.put( + roleName, + getTablePrivilege(dbName, tableName, roleName, + PrincipalType.ROLE)); + } + ret.setRolePrivileges(tableRolePriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, + String tableName, String partitionName, String columnName, + String userName, List groupNames) + throws InvalidObjectException, MetaException { + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + columnName = columnName.toLowerCase().trim(); + + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map> columnUserPriv = new HashMap>(); + columnUserPriv.put( + userName, + getColumnPrivilege(dbName, tableName, columnName, + partitionName, userName, PrincipalType.USER)); + ret.setUserPrivileges(columnUserPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map> columnGroupPriv = new HashMap>(); + for (String groupName : groupNames) { + columnGroupPriv.put( + groupName, + getColumnPrivilege(dbName, tableName, columnName, + partitionName, groupName, + PrincipalType.GROUP)); + } + ret.setGroupPrivileges(columnGroupPriv); + } + Set roleNames = listAllRolesInHierarchy(userName, + groupNames); + if (roleNames != null && roleNames.size() > 0) { + Map> columnRolePriv = new HashMap>(); + for (String roleName : roleNames) { + columnRolePriv + .put(roleName, + getColumnPrivilege(dbName, tableName, + columnName, partitionName, + roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(columnRolePriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + private List getPartitionPrivilege(String dbName, + String tableName, String partName, String principalName, + PrincipalType principalType) { + + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + if (principalName != null) { + List userNameTabPartPriv = this + .listPrincipalPartitionGrants(principalName, principalType, + dbName, tableName, partName); + if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameTabPartPriv.size()); + for (int i = 0; i < userNameTabPartPriv.size(); i++) { + MPartitionPrivilege item = userNameTabPartPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), + item.getCreateTime(), item.getGrantor(), + getPrincipalTypeFromStr(item.getGrantorType()), + item.getGrantOption())); + + } + return grantInfos; + } + } + return new ArrayList(0); + } + + private PrincipalType getPrincipalTypeFromStr(String str) { + return str == null ? null : PrincipalType.valueOf(str); + } + + private List getTablePrivilege(String dbName, + String tableName, String principalName, PrincipalType principalType) { + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + if (principalName != null) { + List userNameTabPartPriv = this + .listAllTableGrants(principalName, principalType, dbName, + tableName); + if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameTabPartPriv.size()); + for (int i = 0; i < userNameTabPartPriv.size(); i++) { + MTablePrivilege item = userNameTabPartPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), + item.getCreateTime(), item.getGrantor(), + getPrincipalTypeFromStr(item.getGrantorType()), + item.getGrantOption())); + } + return grantInfos; + } + } + return new ArrayList(0); + } + + private List getColumnPrivilege(String dbName, + String tableName, String columnName, String partitionName, + String principalName, PrincipalType principalType) { + + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + columnName = columnName.toLowerCase().trim(); + + if (partitionName == null) { + List userNameColumnPriv = this + .listPrincipalTableColumnGrants(principalName, + principalType, dbName, tableName, columnName); + if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameColumnPriv.size()); + for (int i = 0; i < userNameColumnPriv.size(); i++) { + MTableColumnPrivilege item = userNameColumnPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), + item.getCreateTime(), item.getGrantor(), + getPrincipalTypeFromStr(item.getGrantorType()), + item.getGrantOption())); + } + return grantInfos; + } + } else { + List userNameColumnPriv = this + .listPrincipalPartitionColumnGrants(principalName, + principalType, dbName, tableName, partitionName, + columnName); + if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameColumnPriv.size()); + for (int i = 0; i < userNameColumnPriv.size(); i++) { + MPartitionColumnPrivilege item = userNameColumnPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), + item.getCreateTime(), item.getGrantor(), + getPrincipalTypeFromStr(item.getGrantorType()), + item.getGrantOption())); + } + return grantInfos; + } + } + return new ArrayList(0); + } + + @Override + public boolean grantPrivileges(PrivilegeBag privileges) + throws InvalidObjectException, MetaException, NoSuchObjectException { + boolean committed = false; + int now = (int) (System.currentTimeMillis() / 1000); + try { + openTransaction(); + List persistentObjs = new ArrayList(); + + List privilegeList = privileges + .getPrivileges(); + + if (privilegeList != null && privilegeList.size() > 0) { + Iterator privIter = privilegeList + .iterator(); + Set privSet = new HashSet(); + while (privIter.hasNext()) { + HiveObjectPrivilege privDef = privIter.next(); + HiveObjectRef hiveObject = privDef.getHiveObject(); + String privilegeStr = privDef.getGrantInfo().getPrivilege(); + String[] privs = privilegeStr.split(","); + String userName = privDef.getPrincipalName(); + PrincipalType principalType = privDef.getPrincipalType(); + String grantor = privDef.getGrantInfo().getGrantor(); + String grantorType = privDef.getGrantInfo() + .getGrantorType().toString(); + boolean grantOption = privDef.getGrantInfo() + .isGrantOption(); + privSet.clear(); + + if (principalType == PrincipalType.ROLE) { + validateRole(userName); + } + + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + List globalPrivs = this + .listPrincipalGlobalGrants(userName, + principalType); + if (globalPrivs != null) { + for (MGlobalPrivilege priv : globalPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted by " + grantor); + } + MGlobalPrivilege mGlobalPrivs = new MGlobalPrivilege( + userName, principalType.toString(), + privilege, now, grantor, grantorType, + grantOption); + persistentObjs.add(mGlobalPrivs); + } + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + MDatabase dbObj = getMDatabase(hiveObject.getDbName()); + if (dbObj != null) { + List dbPrivs = this + .listPrincipalDBGrants(userName, + principalType, + hiveObject.getDbName()); + if (dbPrivs != null) { + for (MDBPrivilege priv : dbPrivs) { + if (priv.getGrantor().equalsIgnoreCase( + grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException( + privilege + + " is already granted on database " + + hiveObject.getDbName() + + " by " + grantor); + } + MDBPrivilege mDb = new MDBPrivilege(userName, + principalType.toString(), dbObj, + privilege, now, grantor, grantorType, + grantOption); + persistentObjs.add(mDb); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + MTable tblObj = getMTable(hiveObject.getDbName(), + hiveObject.getObjectName()); + if (tblObj != null) { + List tablePrivs = this + .listAllTableGrants(userName, + principalType, + hiveObject.getDbName(), + hiveObject.getObjectName()); + if (tablePrivs != null) { + for (MTablePrivilege priv : tablePrivs) { + if (priv.getGrantor() != null + && priv.getGrantor() + .equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on table [" + + hiveObject.getDbName() + "," + + hiveObject.getObjectName() + + "] by " + grantor); + } + MTablePrivilege mTab = new MTablePrivilege( + userName, principalType.toString(), + tblObj, privilege, now, grantor, + grantorType, grantOption); + persistentObjs.add(mTab); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + MPartition partObj = this.getMPartition( + hiveObject.getDbName(), + hiveObject.getObjectName(), + hiveObject.getPartValues()); + String partName = null; + if (partObj != null) { + partName = partObj.getPartitionName(); + List partPrivs = this + .listPrincipalPartitionGrants(userName, + principalType, + hiveObject.getDbName(), + hiveObject.getObjectName(), + partObj.getPartitionName()); + if (partPrivs != null) { + for (MPartitionPrivilege priv : partPrivs) { + if (priv.getGrantor().equalsIgnoreCase( + grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException( + privilege + + " is already granted on partition [" + + hiveObject.getDbName() + + "," + + hiveObject + .getObjectName() + + "," + partName + "] by " + + grantor); + } + MPartitionPrivilege mTab = new MPartitionPrivilege( + userName, principalType.toString(), + partObj, privilege, now, grantor, + grantorType, grantOption); + persistentObjs.add(mTab); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + MTable tblObj = getMTable(hiveObject.getDbName(), + hiveObject.getObjectName()); + if (tblObj != null) { + if (hiveObject.getPartValues() != null) { + MPartition partObj = null; + List colPrivs = null; + partObj = this.getMPartition( + hiveObject.getDbName(), + hiveObject.getObjectName(), + hiveObject.getPartValues()); + if (partObj == null) { + continue; + } + colPrivs = this + .listPrincipalPartitionColumnGrants( + userName, principalType, + hiveObject.getDbName(), + hiveObject.getObjectName(), + partObj.getPartitionName(), + hiveObject.getColumnName()); + + if (colPrivs != null) { + for (MPartitionColumnPrivilege priv : colPrivs) { + if (priv.getGrantor().equalsIgnoreCase( + grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException( + privilege + + " is already granted on column " + + hiveObject + .getColumnName() + + " [" + + hiveObject + .getDbName() + + "," + + hiveObject + .getObjectName() + + "," + + partObj + .getPartitionName() + + "] by " + grantor); + } + MPartitionColumnPrivilege mCol = new MPartitionColumnPrivilege( + userName, principalType.toString(), + partObj, + hiveObject.getColumnName(), + privilege, now, grantor, + grantorType, grantOption); + persistentObjs.add(mCol); + } + + } else { + List colPrivs = null; + colPrivs = this.listPrincipalTableColumnGrants( + userName, principalType, + hiveObject.getDbName(), + hiveObject.getObjectName(), + hiveObject.getColumnName()); + + if (colPrivs != null) { + for (MTableColumnPrivilege priv : colPrivs) { + if (priv.getGrantor().equalsIgnoreCase( + grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException( + privilege + + " is already granted on column " + + hiveObject + .getColumnName() + + " [" + + hiveObject + .getDbName() + + "," + + hiveObject + .getObjectName() + + "] by " + grantor); + } + MTableColumnPrivilege mCol = new MTableColumnPrivilege( + userName, principalType.toString(), + tblObj, hiveObject.getColumnName(), + privilege, now, grantor, + grantorType, grantOption); + persistentObjs.add(mCol); + } + } + } + } + } + } + if (persistentObjs.size() > 0) { + pm.makePersistentAll(persistentObjs); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return committed; + } + + @Override + public boolean revokePrivileges(PrivilegeBag privileges) + throws InvalidObjectException, MetaException, NoSuchObjectException { + boolean committed = false; + try { + openTransaction(); + List persistentObjs = new ArrayList(); + + List privilegeList = privileges + .getPrivileges(); + + if (privilegeList != null && privilegeList.size() > 0) { + Iterator privIter = privilegeList + .iterator(); + + while (privIter.hasNext()) { + HiveObjectPrivilege privDef = privIter.next(); + HiveObjectRef hiveObject = privDef.getHiveObject(); + String privilegeStr = privDef.getGrantInfo().getPrivilege(); + if (privilegeStr == null || privilegeStr.trim().equals("")) { + continue; + } + String[] privs = privilegeStr.split(","); + String userName = privDef.getPrincipalName(); + PrincipalType principalType = privDef.getPrincipalType(); + + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + List mSecUser = this + .listPrincipalGlobalGrants(userName, + principalType); + boolean found = false; + if (mSecUser != null) { + for (String privilege : privs) { + for (MGlobalPrivilege userGrant : mSecUser) { + String userGrantPrivs = userGrant + .getPrivilege(); + if (privilege.equals(userGrantPrivs)) { + found = true; + persistentObjs.add(userGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No user grant found for privileges " + + privilege); + } + } + } + + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + MDatabase dbObj = getMDatabase(hiveObject.getDbName()); + if (dbObj != null) { + String db = hiveObject.getDbName(); + boolean found = false; + List dbGrants = this + .listPrincipalDBGrants(userName, + principalType, db); + for (String privilege : privs) { + for (MDBPrivilege dbGrant : dbGrants) { + String dbGrantPriv = dbGrant.getPrivilege(); + if (privilege.equals(dbGrantPriv)) { + found = true; + persistentObjs.add(dbGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No database grant found for privileges " + + privilege + + " on database " + db); + } + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + boolean found = false; + List tableGrants = this + .listAllTableGrants(userName, principalType, + hiveObject.getDbName(), + hiveObject.getObjectName()); + for (String privilege : privs) { + for (MTablePrivilege tabGrant : tableGrants) { + String tableGrantPriv = tabGrant.getPrivilege(); + if (privilege.equalsIgnoreCase(tableGrantPriv)) { + found = true; + persistentObjs.add(tabGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + + privilege + ") found " + " on table " + + hiveObject.getObjectName() + + ", database is " + + hiveObject.getDbName()); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + + boolean found = false; + Table tabObj = this.getTable(hiveObject.getDbName(), + hiveObject.getObjectName()); + String partName = null; + if (hiveObject.getPartValues() != null) { + partName = Warehouse.makePartName( + tabObj.getPartitionKeys(), + hiveObject.getPartValues()); + } + List partitionGrants = this + .listPrincipalPartitionGrants(userName, + principalType, hiveObject.getDbName(), + hiveObject.getObjectName(), partName); + for (String privilege : privs) { + for (MPartitionPrivilege partGrant : partitionGrants) { + String partPriv = partGrant.getPrivilege(); + if (partPriv.equalsIgnoreCase(privilege)) { + found = true; + persistentObjs.add(partGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + + privilege + ") found " + " on table " + + tabObj.getTableName() + + ", partition is " + partName + + ", database is " + tabObj.getDbName()); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + + Table tabObj = this.getTable(hiveObject.getDbName(), + hiveObject.getObjectName()); + String partName = null; + if (hiveObject.getPartValues() != null) { + partName = Warehouse.makePartName( + tabObj.getPartitionKeys(), + hiveObject.getPartValues()); + } + + if (partName != null) { + List mSecCol = listPrincipalPartitionColumnGrants( + userName, principalType, + hiveObject.getDbName(), + hiveObject.getObjectName(), partName, + hiveObject.getColumnName()); + boolean found = false; + if (mSecCol != null) { + for (String privilege : privs) { + for (MPartitionColumnPrivilege col : mSecCol) { + String colPriv = col.getPrivilege(); + if (colPriv.equalsIgnoreCase(privilege)) { + found = true; + persistentObjs.add(col); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No grant (" + + privilege + + ") found " + + " on table " + + tabObj.getTableName() + + ", partition is " + + partName + + ", column name = " + + hiveObject + .getColumnName() + + ", database is " + + tabObj.getDbName()); + } + } + } + } else { + List mSecCol = listPrincipalTableColumnGrants( + userName, principalType, + hiveObject.getDbName(), + hiveObject.getObjectName(), + hiveObject.getColumnName()); + boolean found = false; + if (mSecCol != null) { + for (String privilege : privs) { + for (MTableColumnPrivilege col : mSecCol) { + String colPriv = col.getPrivilege(); + if (colPriv.equalsIgnoreCase(privilege)) { + found = true; + persistentObjs.add(col); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No grant (" + + privilege + + ") found " + + " on table " + + tabObj.getTableName() + + ", column name = " + + hiveObject + .getColumnName() + + ", database is " + + tabObj.getDbName()); + } + } + } + } + + } + } + } + + if (persistentObjs.size() > 0) { + pm.deletePersistentAll(persistentObjs); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return committed; + } + + @SuppressWarnings("unchecked") + @Override + public List listRoleMembers(String roleName) { + boolean success = false; + List mRoleMemeberList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityUserRoleMember"); + Query query = pm.newQuery(MRoleMap.class, "role.roleName == t1"); + query.declareParameters("java.lang.String t1"); + query.setUnique(false); + mRoleMemeberList = (List) query.execute(roleName); + LOG.debug("Done executing query for listMSecurityUserRoleMember"); + pm.retrieveAll(mRoleMemeberList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityUserRoleMember"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mRoleMemeberList; + } + + @SuppressWarnings("unchecked") + @Override + public List listPrincipalGlobalGrants( + String principalName, PrincipalType principalType) { + boolean commited = false; + List userNameDbPriv = null; + try { + openTransaction(); + if (principalName != null) { + Query query = pm.newQuery(MGlobalPrivilege.class, + "principalName == t1 && principalType == t2 "); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + userNameDbPriv = (List) query + .executeWithArray(principalName, + principalType.toString()); + pm.retrieveAll(userNameDbPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return userNameDbPriv; + } + + @Override + public List listGlobalGrantsAll() { + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MGlobalPrivilege.class); + List userNameDbPriv = (List) query + .execute(); + pm.retrieveAll(userNameDbPriv); + commited = commitTransaction(); + return convertGlobal(userNameDbPriv); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + } + + private List convertGlobal(List privs) { + List result = new ArrayList(); + for (MGlobalPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + PrincipalType ptype = PrincipalType + .valueOf(priv.getPrincipalType()); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.GLOBAL, + null, null, null, null); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo( + priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv + .getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); + } + return result; + } + + @SuppressWarnings("unchecked") + @Override + public List listPrincipalDBGrants(String principalName, + PrincipalType principalType, String dbName) { + boolean success = false; + List mSecurityDBList = null; + dbName = dbName.toLowerCase().trim(); + + try { + openTransaction(); + LOG.debug("Executing listPrincipalDBGrants"); + Query query = pm + .newQuery(MDBPrivilege.class, + "principalName == t1 && principalType == t2 && database.name == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mSecurityDBList = (List) query.executeWithArray( + principalName, principalType.toString(), dbName); + LOG.debug("Done executing query for listPrincipalDBGrants"); + pm.retrieveAll(mSecurityDBList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalDBGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityDBList; + } + + @Override + public List listPrincipalDBGrantsAll( + String principalName, PrincipalType principalType) { + return convertDB(listPrincipalAllDBGrant(principalName, principalType)); + } + + @Override + public List listDBGrantsAll(String dbName) { + return convertDB(listDatabaseGrants(dbName)); + } + + private List convertDB(List privs) { + List result = new ArrayList(); + for (MDBPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + PrincipalType ptype = PrincipalType + .valueOf(priv.getPrincipalType()); + String database = priv.getDatabase().getName(); + + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.DATABASE, database, null, null, null); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo( + priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv + .getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); + } + return result; + } + + @SuppressWarnings("unchecked") + private List listPrincipalAllDBGrant(String principalName, + PrincipalType principalType) { + boolean success = false; + List mSecurityDBList = null; + try { + openTransaction(); + LOG.debug("Executing listPrincipalAllDBGrant"); + if (principalName != null && principalType != null) { + Query query = pm.newQuery(MDBPrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityDBList = (List) query.execute( + principalName, principalType.toString()); + } else { + Query query = pm.newQuery(MDBPrivilege.class); + mSecurityDBList = (List) query.execute(); + } + LOG.debug("Done executing query for listPrincipalAllDBGrant"); + pm.retrieveAll(mSecurityDBList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalAllDBGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityDBList; + } + + @SuppressWarnings("unchecked") + public List listAllTableGrants(String dbName, + String tableName) { + boolean success = false; + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + List mSecurityTabList = null; + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + try { + openTransaction(); + LOG.debug("Executing listAllTableGrants"); + String queryStr = "table.tableName == t1 && table.database.name == t2"; + Query query = pm.newQuery(MTablePrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabList = (List) query.executeWithArray( + tableName, dbName); + LOG.debug("Done executing query for listAllTableGrants"); + pm.retrieveAll(mSecurityTabList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listAllTableGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabList; + } + + @SuppressWarnings("unchecked") + public List listTableAllPartitionGrants(String dbName, + String tableName) { + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listTableAllPartitionGrants"); + String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2"; + Query query = pm.newQuery(MPartitionPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName); + LOG.debug("Done executing query for listTableAllPartitionGrants"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listTableAllPartitionGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + public List listTableAllColumnGrants(String dbName, + String tableName) { + boolean success = false; + List mTblColPrivilegeList = null; + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + try { + openTransaction(); + LOG.debug("Executing listTableAllColumnGrants"); + String queryStr = "table.tableName == t1 && table.database.name == t2"; + Query query = pm.newQuery(MTableColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mTblColPrivilegeList = (List) query + .executeWithArray(tableName, dbName); + LOG.debug("Done executing query for listTableAllColumnGrants"); + pm.retrieveAll(mTblColPrivilegeList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listTableAllColumnGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mTblColPrivilegeList; + } + + @SuppressWarnings("unchecked") + public List listTableAllPartitionColumnGrants( + String dbName, String tableName) { + boolean success = false; + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listTableAllPartitionColumnGrants"); + String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2"; + Query query = pm + .newQuery(MPartitionColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityColList = (List) query + .executeWithArray(tableName, dbName); + LOG.debug("Done executing query for listTableAllPartitionColumnGrants"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listTableAllPartitionColumnGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + public List listPartitionAllColumnGrants( + String dbName, String tableName, List partNames) { + boolean success = false; + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listPartitionAllColumnGrants"); + mSecurityColList = queryByPartitionNames(dbName, tableName, + partNames, MPartitionColumnPrivilege.class, + "partition.table.tableName", + "partition.table.database.name", "partition.partitionName"); + LOG.debug("Done executing query for listPartitionAllColumnGrants"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPartitionAllColumnGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + public void dropPartitionAllColumnGrantsNoTxn(String dbName, + String tableName, List partNames) { + ObjectPair queryWithParams = makeQueryByPartitionNames( + dbName, tableName, partNames, MPartitionColumnPrivilege.class, + "partition.table.tableName", "partition.table.database.name", + "partition.partitionName"); + queryWithParams.getFirst().deletePersistentAll( + queryWithParams.getSecond()); + } + + @SuppressWarnings("unchecked") + private List listDatabaseGrants(String dbName) { + dbName = dbName.toLowerCase().trim(); + + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listDatabaseGrants"); + Query query = pm + .newQuery(MDBPrivilege.class, "database.name == t1"); + query.declareParameters("java.lang.String t1"); + List mSecurityDBList = (List) query + .executeWithArray(dbName); + LOG.debug("Done executing query for listDatabaseGrants"); + pm.retrieveAll(mSecurityDBList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listDatabaseGrants"); + return mSecurityDBList; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @SuppressWarnings("unchecked") + private List listPartitionGrants(String dbName, + String tableName, List partNames) { + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listPartitionGrants"); + mSecurityTabPartList = queryByPartitionNames(dbName, tableName, + partNames, MPartitionPrivilege.class, + "partition.table.tableName", + "partition.table.database.name", "partition.partitionName"); + LOG.debug("Done executing query for listPartitionGrants"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPartitionGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + private void dropPartitionGrantsNoTxn(String dbName, String tableName, + List partNames) { + ObjectPair queryWithParams = makeQueryByPartitionNames( + dbName, tableName, partNames, MPartitionPrivilege.class, + "partition.table.tableName", "partition.table.database.name", + "partition.partitionName"); + queryWithParams.getFirst().deletePersistentAll( + queryWithParams.getSecond()); + } + + @SuppressWarnings("unchecked") + private List queryByPartitionNames(String dbName, String tableName, + List partNames, Class clazz, String tbCol, String dbCol, + String partCol) { + ObjectPair queryAndParams = makeQueryByPartitionNames( + dbName, tableName, partNames, clazz, tbCol, dbCol, partCol); + return (List) queryAndParams.getFirst().executeWithArray( + queryAndParams.getSecond()); + } + + private ObjectPair makeQueryByPartitionNames( + String dbName, String tableName, List partNames, + Class clazz, String tbCol, String dbCol, String partCol) { + String queryStr = tbCol + " == t1 && " + dbCol + " == t2"; + String paramStr = "java.lang.String t1, java.lang.String t2"; + Object[] params = new Object[2 + partNames.size()]; + params[0] = tableName; + params[1] = dbName; + int index = 0; + for (String partName : partNames) { + params[index + 2] = partName; + queryStr += ((index == 0) ? " && (" : " || ") + partCol + " == p" + + index; + paramStr += ", java.lang.String p" + index; + ++index; + } + queryStr += ")"; + Query query = pm.newQuery(clazz, queryStr); + query.declareParameters(paramStr); + return new ObjectPair(query, params); + } + + @Override + @SuppressWarnings("unchecked") + public List listAllTableGrants(String principalName, + PrincipalType principalType, String dbName, String tableName) { + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listAllTableGrants"); + Query query = pm + .newQuery( + MTablePrivilege.class, + "principalName == t1 && principalType == t2 && table.tableName == t3 && table.database.name == t4"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"); + mSecurityTabPartList = (List) query + .executeWithArray(principalName, principalType.toString(), + tableName, dbName); + LOG.debug("Done executing query for listAllTableGrants"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listAllTableGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + @Override + public List listPrincipalPartitionGrants( + String principalName, PrincipalType principalType, String dbName, + String tableName, String partName) { + boolean success = false; + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalPartitionGrant"); + Query query = pm + .newQuery( + MPartitionPrivilege.class, + "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " + + "&& partition.table.database.name == t4 && partition.partitionName == t5"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " + + "java.lang.String t5"); + mSecurityTabPartList = (List) query + .executeWithArray(principalName, principalType.toString(), + tableName, dbName, partName); + LOG.debug("Done executing query for listMSecurityPrincipalPartitionGrant"); + + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalPartitionGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + @Override + public List listPrincipalTableColumnGrants( + String principalName, PrincipalType principalType, String dbName, + String tableName, String columnName) { + boolean success = false; + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + columnName = columnName.toLowerCase().trim(); + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listPrincipalTableColumnGrants"); + String queryStr = "principalName == t1 && principalType == t2 && " + + "table.tableName == t3 && table.database.name == t4 && columnName == t5 "; + Query query = pm.newQuery(MTableColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4, java.lang.String t5"); + mSecurityColList = (List) query + .executeWithArray(principalName, principalType.toString(), + tableName, dbName, columnName); + LOG.debug("Done executing query for listPrincipalTableColumnGrants"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @Override + @SuppressWarnings("unchecked") + public List listPrincipalPartitionColumnGrants( + String principalName, PrincipalType principalType, String dbName, + String tableName, String partitionName, String columnName) { + boolean success = false; + tableName = tableName.toLowerCase().trim(); + dbName = dbName.toLowerCase().trim(); + columnName = columnName.toLowerCase().trim(); + + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listPrincipalPartitionColumnGrants"); + Query query = pm + .newQuery( + MPartitionColumnPrivilege.class, + "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " + + "&& partition.table.database.name == t4 && partition.partitionName == t5 && columnName == t6"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4, java.lang.String t5, java.lang.String t6"); + + mSecurityColList = (List) query + .executeWithArray(principalName, principalType.toString(), + tableName, dbName, partitionName, columnName); + LOG.debug("Done executing query for listPrincipalPartitionColumnGrants"); + pm.retrieveAll(mSecurityColList); + + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalPartitionColumnGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @Override + public List listPrincipalPartitionColumnGrantsAll( + String principalName, PrincipalType principalType) { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listPrincipalPartitionColumnGrantsAll"); + List mSecurityTabPartList; + if (principalName != null && principalType != null) { + Query query = pm.newQuery(MPartitionColumnPrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query + .executeWithArray(principalName, + principalType.toString()); + } else { + Query query = pm.newQuery(MPartitionColumnPrivilege.class); + mSecurityTabPartList = (List) query + .execute(); + } + LOG.debug("Done executing query for listPrincipalPartitionColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertPartCols(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalPartitionColumnGrantsAll"); + return result; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override + public List listPartitionColumnGrantsAll( + String dbName, String tableName, String partitionName, + String columnName) { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listPartitionColumnGrantsAll"); + Query query = pm + .newQuery( + MPartitionColumnPrivilege.class, + "partition.table.tableName == t3 && partition.table.database.name == t4 && " + + "partition.partitionName == t5 && columnName == t6"); + query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5, java.lang.String t6"); + List mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName, partitionName, + columnName); + LOG.debug("Done executing query for listPartitionColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertPartCols(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPartitionColumnGrantsAll"); + return result; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private List convertPartCols( + List privs) { + List result = new ArrayList(); + for (MPartitionColumnPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + PrincipalType ptype = PrincipalType + .valueOf(priv.getPrincipalType()); + + MPartition mpartition = priv.getPartition(); + MTable mtable = mpartition.getTable(); + MDatabase mdatabase = mtable.getDatabase(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.COLUMN, + mdatabase.getName(), mtable.getTableName(), + mpartition.getValues(), priv.getColumnName()); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo( + priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv + .getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); + } + return result; + } + + @SuppressWarnings("unchecked") + private List listPrincipalAllTableGrants( + String principalName, PrincipalType principalType) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listPrincipalAllTableGrants"); + Query query = pm.newQuery(MTablePrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query.execute( + principalName, principalType.toString()); + LOG.debug("Done executing query for listPrincipalAllTableGrants"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @Override + public List listPrincipalTableGrantsAll( + String principalName, PrincipalType principalType) { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listPrincipalAllTableGrants"); + List mSecurityTabPartList; + if (principalName != null && principalType != null) { + Query query = pm.newQuery(MTablePrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query.execute( + principalName, principalType.toString()); + } else { + Query query = pm.newQuery(MTablePrivilege.class); + mSecurityTabPartList = (List) query.execute(); + } + LOG.debug("Done executing query for listPrincipalAllTableGrants"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertTable(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); + return result; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override + public List listTableGrantsAll(String dbName, + String tableName) { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listTableGrantsAll"); + Query query = pm.newQuery(MTablePrivilege.class, + "table.tableName == t1 && table.database.name == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + List mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName); + LOG.debug("Done executing query for listTableGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertTable(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); + return result; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private List convertTable(List privs) { + List result = new ArrayList(); + for (MTablePrivilege priv : privs) { + String pname = priv.getPrincipalName(); + PrincipalType ptype = PrincipalType + .valueOf(priv.getPrincipalType()); + + String table = priv.getTable().getTableName(); + String database = priv.getTable().getDatabase().getName(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.TABLE, + database, table, null, null); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo( + priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv + .getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); + } + return result; + } + + @SuppressWarnings("unchecked") + private List listPrincipalAllPartitionGrants( + String principalName, PrincipalType principalType) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listPrincipalAllPartitionGrants"); + Query query = pm.newQuery(MPartitionPrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query.execute( + principalName, principalType.toString()); + LOG.debug("Done executing query for listPrincipalAllPartitionGrants"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalAllPartitionGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @Override + public List listPrincipalPartitionGrantsAll( + String principalName, PrincipalType principalType) { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listPrincipalPartitionGrantsAll"); + List mSecurityTabPartList; + if (principalName != null && principalType != null) { + Query query = pm.newQuery(MPartitionPrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query + .execute(principalName, principalType.toString()); + } else { + Query query = pm.newQuery(MPartitionPrivilege.class); + mSecurityTabPartList = (List) query + .execute(); + } + LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertPartition(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalPartitionGrantsAll"); + return result; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override + public List listPartitionGrantsAll(String dbName, + String tableName, String partitionName) { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listPrincipalPartitionGrantsAll"); + Query query = pm.newQuery(MPartitionPrivilege.class, + "partition.table.tableName == t3 && partition.table.database.name == t4 && " + + "partition.partitionName == t5"); + query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5"); + List mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName, partitionName); + LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertPartition(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalPartitionGrantsAll"); + return result; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private List convertPartition( + List privs) { + List result = new ArrayList(); + for (MPartitionPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + PrincipalType ptype = PrincipalType + .valueOf(priv.getPrincipalType()); + + MPartition mpartition = priv.getPartition(); + MTable mtable = mpartition.getTable(); + MDatabase mdatabase = mtable.getDatabase(); + + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.PARTITION, mdatabase.getName(), + mtable.getTableName(), mpartition.getValues(), null); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo( + priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv + .getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); + } + return result; + } + + @SuppressWarnings("unchecked") + private List listPrincipalAllTableColumnGrants( + String principalName, PrincipalType principalType) { + boolean success = false; + List mSecurityColumnList = null; + try { + openTransaction(); + LOG.debug("Executing listPrincipalAllTableColumnGrants"); + Query query = pm.newQuery(MTableColumnPrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityColumnList = (List) query.execute( + principalName, principalType.toString()); + LOG.debug("Done executing query for listPrincipalAllTableColumnGrants"); + pm.retrieveAll(mSecurityColumnList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalAllTableColumnGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColumnList; + } + + @Override + public List listPrincipalTableColumnGrantsAll( + String principalName, PrincipalType principalType) { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listPrincipalTableColumnGrantsAll"); + + List mSecurityTabPartList; + if (principalName != null && principalType != null) { + Query query = pm.newQuery(MTableColumnPrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query + .execute(principalName, principalType.toString()); + } else { + Query query = pm.newQuery(MTableColumnPrivilege.class); + mSecurityTabPartList = (List) query + .execute(); + } + LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertTableCols(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll"); + return result; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override + public List listTableColumnGrantsAll(String dbName, + String tableName, String columnName) { + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing listPrincipalTableColumnGrantsAll"); + Query query = pm + .newQuery(MTableColumnPrivilege.class, + "table.tableName == t3 && table.database.name == t4 && columnName == t5"); + query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5"); + List mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName, columnName); + LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertTableCols(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll"); + return result; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private List convertTableCols( + List privs) { + List result = new ArrayList(); + for (MTableColumnPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + PrincipalType ptype = PrincipalType + .valueOf(priv.getPrincipalType()); + + MTable mtable = priv.getTable(); + MDatabase mdatabase = mtable.getDatabase(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.COLUMN, + mdatabase.getName(), mtable.getTableName(), null, + priv.getColumnName()); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo( + priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv + .getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor)); + } + return result; + } + + @SuppressWarnings("unchecked") + private List listPrincipalAllPartitionColumnGrants( + String principalName, PrincipalType principalType) { + boolean success = false; + List mSecurityColumnList = null; + try { + openTransaction(); + LOG.debug("Executing listPrincipalAllTableColumnGrants"); + Query query = pm.newQuery(MPartitionColumnPrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityColumnList = (List) query + .execute(principalName, principalType.toString()); + LOG.debug("Done executing query for listPrincipalAllTableColumnGrants"); + pm.retrieveAll(mSecurityColumnList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalAllTableColumnGrants"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColumnList; + } + + @Override + public boolean isPartitionMarkedForEvent(String dbName, String tblName, + Map partName, PartitionEventType evtType) + throws UnknownTableException, MetaException, + InvalidPartitionException, UnknownPartitionException { + + Collection partEvents; + boolean success = false; + LOG.debug("Begin Executing isPartitionMarkedForEvent"); + try { + openTransaction(); + Query query = pm + .newQuery(MPartitionEvent.class, + "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4"); + Table tbl = getTable(dbName, tblName); // Make sure dbName and + // tblName are valid. + if (null == tbl) { + throw new UnknownTableException("Table: " + tblName + + " is not found."); + } + partEvents = (Collection) query.executeWithArray( + dbName, tblName, getPartitionStr(tbl, partName), + evtType.getValue()); + pm.retrieveAll(partEvents); + success = commitTransaction(); + LOG.debug("Done executing isPartitionMarkedForEvent"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return (partEvents != null && !partEvents.isEmpty()) ? true : false; + + } + + @Override + public Table markPartitionForEvent(String dbName, String tblName, + Map partName, PartitionEventType evtType) + throws MetaException, UnknownTableException, + InvalidPartitionException, UnknownPartitionException { + + LOG.debug("Begin executing markPartitionForEvent"); + boolean success = false; + Table tbl = null; + try { + openTransaction(); + tbl = getTable(dbName, tblName); // Make sure dbName and tblName are + // valid. + if (null == tbl) { + throw new UnknownTableException("Table: " + tblName + + " is not found."); + } + pm.makePersistent(new MPartitionEvent(dbName, tblName, + getPartitionStr(tbl, partName), evtType.getValue())); + success = commitTransaction(); + LOG.debug("Done executing markPartitionForEvent"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return tbl; + } + + private String getPartitionStr(Table tbl, Map partName) + throws InvalidPartitionException { + if (tbl.getPartitionKeysSize() != partName.size()) { + throw new InvalidPartitionException( + "Number of partition columns in table: " + + tbl.getPartitionKeysSize() + + " doesn't match with number of supplied partition values: " + + partName.size()); + } + final List storedVals = new ArrayList( + tbl.getPartitionKeysSize()); + for (FieldSchema partKey : tbl.getPartitionKeys()) { + String partVal = partName.get(partKey.getName()); + if (null == partVal) { + throw new InvalidPartitionException( + "No value found for partition column: " + + partKey.getName()); + } + storedVals.add(partVal); + } + return join(storedVals, ','); + } + + /** + * The following API + * + * - executeJDOQLSelect + * + * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. + * + */ + public Collection executeJDOQLSelect(String query) { + boolean committed = false; + Collection result = null; + + try { + openTransaction(); + Query q = pm.newQuery(query); + result = (Collection) q.execute(); + committed = commitTransaction(); + if (committed) { + return result; + } else { + return null; + } + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + /** + * The following API + * + * - executeJDOQLUpdate + * + * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. + * + */ + public long executeJDOQLUpdate(String query) { + boolean committed = false; + long numUpdated = 0; + + try { + openTransaction(); + Query q = pm.newQuery(query); + numUpdated = (Long) q.execute(); + committed = commitTransaction(); + if (committed) { + return numUpdated; + } else { + return -1; + } + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + /** + * The following API + * + * - listFSRoots + * + * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. + * + */ + public Set listFSRoots() { + boolean committed = false; + Set fsRoots = new HashSet(); + + try { + openTransaction(); + Query query = pm.newQuery(MDatabase.class); + List mDBs = (List) query.execute(); + pm.retrieveAll(mDBs); + + for (MDatabase mDB : mDBs) { + fsRoots.add(mDB.getLocationUri()); + } + committed = commitTransaction(); + if (committed) { + return fsRoots; + } else { + return null; + } + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + private boolean shouldUpdateURI(URI onDiskUri, URI inputUri) { + String onDiskHost = onDiskUri.getHost(); + String inputHost = inputUri.getHost(); + + int onDiskPort = onDiskUri.getPort(); + int inputPort = inputUri.getPort(); + + String onDiskScheme = onDiskUri.getScheme(); + String inputScheme = inputUri.getScheme(); + + // compare ports + if (inputPort != -1) { + if (inputPort != onDiskPort) { + return false; + } + } + // compare schemes + if (inputScheme != null) { + if (onDiskScheme == null) { + return false; + } + if (!inputScheme.equalsIgnoreCase(onDiskScheme)) { + return false; + } + } + // compare hosts + if (onDiskHost != null) { + if (!inputHost.equalsIgnoreCase(onDiskHost)) { + return false; + } + } else { + return false; + } + return true; + } + + public class UpdateMDatabaseURIRetVal { + private List badRecords; + private Map updateLocations; + + UpdateMDatabaseURIRetVal(List badRecords, + Map updateLocations) { + this.badRecords = badRecords; + this.updateLocations = updateLocations; + } + + public List getBadRecords() { + return badRecords; + } + + public void setBadRecords(List badRecords) { + this.badRecords = badRecords; + } + + public Map getUpdateLocations() { + return updateLocations; + } + + public void setUpdateLocations(Map updateLocations) { + this.updateLocations = updateLocations; + } + } + + /** + * The following APIs + * + * - updateMDatabaseURI + * + * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. + * + */ + public UpdateMDatabaseURIRetVal updateMDatabaseURI(URI oldLoc, URI newLoc, + boolean dryRun) { + boolean committed = false; + Map updateLocations = new HashMap(); + List badRecords = new ArrayList(); + UpdateMDatabaseURIRetVal retVal = null; + + try { + openTransaction(); + Query query = pm.newQuery(MDatabase.class); + List mDBs = (List) query.execute(); + pm.retrieveAll(mDBs); + + for (MDatabase mDB : mDBs) { + URI locationURI = null; + String location = mDB.getLocationUri(); + try { + locationURI = new URI(location); + } catch (URISyntaxException e) { + badRecords.add(location); + } catch (NullPointerException e) { + badRecords.add(location); + } + if (locationURI == null) { + badRecords.add(location); + } else { + if (shouldUpdateURI(locationURI, oldLoc)) { + String dbLoc = mDB.getLocationUri().replaceAll( + oldLoc.toString(), newLoc.toString()); + updateLocations.put(locationURI.toString(), dbLoc); + if (!dryRun) { + mDB.setLocationUri(dbLoc); + } + } + } + } + committed = commitTransaction(); + if (committed) { + retVal = new UpdateMDatabaseURIRetVal(badRecords, + updateLocations); + } + return retVal; + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + public class UpdateMStorageDescriptorTblPropURIRetVal { + private List badRecords; + private Map updateLocations; + + UpdateMStorageDescriptorTblPropURIRetVal(List badRecords, + Map updateLocations) { + this.badRecords = badRecords; + this.updateLocations = updateLocations; + } + + public List getBadRecords() { + return badRecords; + } + + public void setBadRecords(List badRecords) { + this.badRecords = badRecords; + } + + public Map getUpdateLocations() { + return updateLocations; + } + + public void setUpdateLocations(Map updateLocations) { + this.updateLocations = updateLocations; + } + } + + /** + * The following APIs + * + * - updateMStorageDescriptorTblPropURI + * + * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. + * + */ + public UpdateMStorageDescriptorTblPropURIRetVal updateMStorageDescriptorTblPropURI( + URI oldLoc, URI newLoc, String tblPropKey, boolean isDryRun) { + boolean committed = false; + Map updateLocations = new HashMap(); + List badRecords = new ArrayList(); + UpdateMStorageDescriptorTblPropURIRetVal retVal = null; + + try { + openTransaction(); + Query query = pm.newQuery(MStorageDescriptor.class); + List mSDSs = (List) query + .execute(); + pm.retrieveAll(mSDSs); + + for (MStorageDescriptor mSDS : mSDSs) { + URI tablePropLocationURI = null; + if (mSDS.getParameters().containsKey(tblPropKey)) { + String tablePropLocation = mSDS.getParameters().get( + tblPropKey); + try { + tablePropLocationURI = new URI(tablePropLocation); + } catch (URISyntaxException e) { + badRecords.add(tablePropLocation); + } catch (NullPointerException e) { + badRecords.add(tablePropLocation); + } + // if tablePropKey that was passed in lead to a valid URI + // resolution, update it if + // parts of it match the old-NN-loc, else add to badRecords + if (tablePropLocationURI == null) { + badRecords.add(tablePropLocation); + } else { + if (shouldUpdateURI(tablePropLocationURI, oldLoc)) { + String tblPropLoc = mSDS + .getParameters() + .get(tblPropKey) + .replaceAll(oldLoc.toString(), + newLoc.toString()); + updateLocations + .put(tablePropLocationURI.toString(), + tblPropLoc); + if (!isDryRun) { + mSDS.getParameters() + .put(tblPropKey, tblPropLoc); + } + } + } + } + } + committed = commitTransaction(); + if (committed) { + retVal = new UpdateMStorageDescriptorTblPropURIRetVal( + badRecords, updateLocations); + } + return retVal; + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + public class UpdateMStorageDescriptorTblURIRetVal { + private List badRecords; + private Map updateLocations; + + UpdateMStorageDescriptorTblURIRetVal(List badRecords, + Map updateLocations) { + this.badRecords = badRecords; + this.updateLocations = updateLocations; + } + + public List getBadRecords() { + return badRecords; + } + + public void setBadRecords(List badRecords) { + this.badRecords = badRecords; + } + + public Map getUpdateLocations() { + return updateLocations; + } + + public void setUpdateLocations(Map updateLocations) { + this.updateLocations = updateLocations; + } + } + + /** + * The following APIs + * + * - updateMStorageDescriptorTblURI + * + * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. + * + */ + public UpdateMStorageDescriptorTblURIRetVal updateMStorageDescriptorTblURI( + URI oldLoc, URI newLoc, boolean isDryRun) { + boolean committed = false; + Map updateLocations = new HashMap(); + List badRecords = new ArrayList(); + UpdateMStorageDescriptorTblURIRetVal retVal = null; + + try { + openTransaction(); + Query query = pm.newQuery(MStorageDescriptor.class); + List mSDSs = (List) query + .execute(); + pm.retrieveAll(mSDSs); + + for (MStorageDescriptor mSDS : mSDSs) { + URI locationURI = null; + String location = mSDS.getLocation(); + try { + locationURI = new URI(location); + } catch (URISyntaxException e) { + badRecords.add(location); + } catch (NullPointerException e) { + badRecords.add(location); + } + if (locationURI == null) { + badRecords.add(location); + } else { + if (shouldUpdateURI(locationURI, oldLoc)) { + String tblLoc = mSDS.getLocation().replaceAll( + oldLoc.toString(), newLoc.toString()); + updateLocations.put(locationURI.toString(), tblLoc); + if (!isDryRun) { + mSDS.setLocation(tblLoc); + } + } + } + } + committed = commitTransaction(); + if (committed) { + retVal = new UpdateMStorageDescriptorTblURIRetVal(badRecords, + updateLocations); + } + return retVal; + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + public class UpdateSerdeURIRetVal { + private List badRecords; + private Map updateLocations; + + UpdateSerdeURIRetVal(List badRecords, + Map updateLocations) { + this.badRecords = badRecords; + this.updateLocations = updateLocations; + } + + public List getBadRecords() { + return badRecords; + } + + public void setBadRecords(List badRecords) { + this.badRecords = badRecords; + } + + public Map getUpdateLocations() { + return updateLocations; + } + + public void setUpdateLocations(Map updateLocations) { + this.updateLocations = updateLocations; + } + } + + /** + * The following APIs + * + * - updateSerdeURI + * + * is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift. + * + */ + public UpdateSerdeURIRetVal updateSerdeURI(URI oldLoc, URI newLoc, + String serdeProp, boolean isDryRun) { + boolean committed = false; + Map updateLocations = new HashMap(); + List badRecords = new ArrayList(); + UpdateSerdeURIRetVal retVal = null; + + try { + openTransaction(); + Query query = pm.newQuery(MSerDeInfo.class); + List mSerdes = (List) query.execute(); + pm.retrieveAll(mSerdes); + + for (MSerDeInfo mSerde : mSerdes) { + if (mSerde.getParameters().containsKey(serdeProp)) { + String schemaLoc = mSerde.getParameters().get(serdeProp); + URI schemaLocURI = null; + try { + schemaLocURI = new URI(schemaLoc); + } catch (URISyntaxException e) { + badRecords.add(schemaLoc); + } catch (NullPointerException e) { + badRecords.add(schemaLoc); + } + if (schemaLocURI == null) { + badRecords.add(schemaLoc); + } else { + if (shouldUpdateURI(schemaLocURI, oldLoc)) { + String newSchemaLoc = schemaLoc.replaceAll( + oldLoc.toString(), newLoc.toString()); + updateLocations.put(schemaLocURI.toString(), + newSchemaLoc); + if (!isDryRun) { + mSerde.getParameters().put(serdeProp, + newSchemaLoc); + } + } + } + } + } + committed = commitTransaction(); + if (committed) { + retVal = new UpdateSerdeURIRetVal(badRecords, updateLocations); + } + return retVal; + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + private void writeMTableColumnStatistics(Table table, + MTableColumnStatistics mStatsObj) throws NoSuchObjectException, + MetaException, InvalidObjectException, InvalidInputException { + String dbName = mStatsObj.getDbName(); + String tableName = mStatsObj.getTableName(); + String colName = mStatsObj.getColName(); + + LOG.info("Updating table level column statistics for db=" + dbName + + " tableName=" + tableName + " colName=" + colName); + validateTableCols(table, Lists.newArrayList(colName)); + + List oldStats = getMTableColumnStatistics( + table, Lists.newArrayList(colName)); + + if (!oldStats.isEmpty()) { + assert oldStats.size() == 1; + StatObjectConverter.setFieldsIntoOldStats(mStatsObj, + oldStats.get(0)); + pm.makePersistent(oldStats.get(0)); + } else { + pm.makePersistent(mStatsObj); + } + } + + private void writeMPartitionColumnStatistics(Table table, + Partition partition, MPartitionColumnStatistics mStatsObj) + throws NoSuchObjectException, MetaException, + InvalidObjectException, InvalidInputException { + String dbName = mStatsObj.getDbName(); + String tableName = mStatsObj.getTableName(); + String partName = mStatsObj.getPartitionName(); + String colName = mStatsObj.getColName(); + + LOG.info("Updating partition level column statistics for db=" + dbName + + " tableName=" + tableName + " partName=" + partName + + " colName=" + colName); + + boolean foundCol = false; + List colList = partition.getSd().getCols(); + for (FieldSchema col : colList) { + if (col.getName().equals(mStatsObj.getColName().trim())) { + foundCol = true; + break; + } + } + + if (!foundCol) { + throw new NoSuchObjectException("Column " + colName + + " for which stats gathering is requested doesn't exist."); + } + + List oldStats = getMPartitionColumnStatistics( + table, Lists.newArrayList(partName), + Lists.newArrayList(colName)); + if (!oldStats.isEmpty()) { + assert oldStats.size() == 1; + StatObjectConverter.setFieldsIntoOldStats(mStatsObj, + oldStats.get(0)); + pm.makePersistent(oldStats.get(0)); + } else { + pm.makePersistent(mStatsObj); + } + } + + @Override + public boolean updateTableColumnStatistics(ColumnStatistics colStats) + throws NoSuchObjectException, MetaException, + InvalidObjectException, InvalidInputException { + boolean committed = false; + + openTransaction(); + try { + List statsObjs = colStats.getStatsObj(); + ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + + // DataNucleus objects get detached all over the place for no (real) + // reason. + // So let's not use them anywhere unless absolutely necessary. + Table table = ensureGetTable(statsDesc.getDbName(), + statsDesc.getTableName()); + for (ColumnStatisticsObj statsObj : statsObjs) { + // We have to get mtable again because DataNucleus. + MTableColumnStatistics mStatsObj = StatObjectConverter + .convertToMTableColumnStatistics( + ensureGetMTable(statsDesc.getDbName(), + statsDesc.getTableName()), statsDesc, + statsObj); + writeMTableColumnStatistics(table, mStatsObj); + } + committed = commitTransaction(); + return committed; + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + @Override + public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, + List partVals) throws NoSuchObjectException, MetaException, + InvalidObjectException, InvalidInputException { + boolean committed = false; + + try { + openTransaction(); + List statsObjs = colStats.getStatsObj(); + ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + Table table = ensureGetTable(statsDesc.getDbName(), + statsDesc.getTableName()); + Partition partition = convertToPart(getMPartition( + statsDesc.getDbName(), statsDesc.getTableName(), partVals)); + for (ColumnStatisticsObj statsObj : statsObjs) { + // We have to get partition again because DataNucleus + MPartition mPartition = getMPartition(statsDesc.getDbName(), + statsDesc.getTableName(), partVals); + if (partition == null) { + throw new NoSuchObjectException( + "Partition for which stats is gathered doesn't exist."); + } + MPartitionColumnStatistics mStatsObj = StatObjectConverter + .convertToMPartitionColumnStatistics(mPartition, + statsDesc, statsObj); + writeMPartitionColumnStatistics(table, partition, mStatsObj); + } + committed = commitTransaction(); + return committed; + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + private List getMTableColumnStatistics(Table table, + List colNames) throws MetaException { + boolean committed = false; + openTransaction(); + try { + List result = null; + validateTableCols(table, colNames); + + Query query = pm.newQuery(MTableColumnStatistics.class); + String filter = "tableName == t1 && dbName == t2 && ("; + String paramStr = "java.lang.String t1, java.lang.String t2"; + Object[] params = new Object[colNames.size() + 2]; + params[0] = table.getTableName(); + params[1] = table.getDbName(); + for (int i = 0; i < colNames.size(); ++i) { + filter += ((i == 0) ? "" : " || ") + "colName == c" + i; + paramStr += ", java.lang.String c" + i; + params[i + 2] = colNames.get(i); + } + filter += ")"; + query.setFilter(filter); + query.declareParameters(paramStr); + result = (List) query + .executeWithArray(params); + pm.retrieveAll(result); + if (result.size() > colNames.size()) { + throw new MetaException("Unexpected " + result.size() + + " statistics for " + colNames.size() + " columns"); + } + committed = commitTransaction(); + return result; + } catch (Exception ex) { + LOG.error("Error retrieving statistics via jdo", ex); + if (ex instanceof MetaException) { + throw (MetaException) ex; + } + throw new MetaException(ex.getMessage()); + } finally { + if (!committed) { + rollbackTransaction(); + return Lists.newArrayList(); + } + } + } + + private void validateTableCols(Table table, List colNames) + throws MetaException { + List colList = table.getSd().getCols(); + for (String colName : colNames) { + boolean foundCol = false; + for (FieldSchema mCol : colList) { + if (mCol.getName().equals(colName.trim())) { + foundCol = true; + break; + } + } + if (!foundCol) { + throw new MetaException("Column " + colName + " doesn't exist."); + } + } + } + + @Override + public ColumnStatistics getTableColumnStatistics(String dbName, + String tableName, List colNames) throws MetaException, + NoSuchObjectException { + return getTableColumnStatisticsInternal(dbName, tableName, colNames, + true, true); + } + + protected ColumnStatistics getTableColumnStatisticsInternal(String dbName, + String tableName, final List colNames, boolean allowSql, + boolean allowJdo) throws MetaException, NoSuchObjectException { + return new GetStatHelper(dbName.toLowerCase(), tableName.toLowerCase(), + allowSql, allowJdo) { + @Override + protected ColumnStatistics getSqlResult( + GetHelper ctx) throws MetaException { + return directSql.getTableStats(dbName, tblName, colNames); + } + + @Override + protected ColumnStatistics getJdoResult( + GetHelper ctx) throws MetaException, + NoSuchObjectException { + List mStats = getMTableColumnStatistics( + getTable(), colNames); + if (mStats.isEmpty()) + return null; + // LastAnalyzed is stored per column, but thrift object has it + // per multiple columns. + // Luckily, nobody actually uses it, so we will set to lowest + // value of all columns for now. + ColumnStatisticsDesc desc = StatObjectConverter + .getTableColumnStatisticsDesc(mStats.get(0)); + List statObjs = new ArrayList( + mStats.size()); + for (MTableColumnStatistics mStat : mStats) { + if (desc.getLastAnalyzed() > mStat.getLastAnalyzed()) { + desc.setLastAnalyzed(mStat.getLastAnalyzed()); + } + statObjs.add(StatObjectConverter + .getTableColumnStatisticsObj(mStat)); + } + return new ColumnStatistics(desc, statObjs); + } + }.run(true); + } + + @Override + public List getPartitionColumnStatistics(String dbName, + String tableName, List partNames, List colNames) + throws MetaException, NoSuchObjectException { + return getPartitionColumnStatisticsInternal(dbName, tableName, + partNames, colNames, true, true); + } + + protected List getPartitionColumnStatisticsInternal( + String dbName, String tableName, final List partNames, + final List colNames, boolean allowSql, boolean allowJdo) + throws MetaException, NoSuchObjectException { + return new GetListHelper(dbName, tableName, allowSql, + allowJdo) { + @Override + protected List getSqlResult( + GetHelper> ctx) throws MetaException { + return directSql.getPartitionStats(dbName, tblName, partNames, + colNames); + } + + @Override + protected List getJdoResult( + GetHelper> ctx) + throws MetaException, NoSuchObjectException { + List mStats = getMPartitionColumnStatistics( + getTable(), partNames, colNames); + List result = new ArrayList( + Math.min(mStats.size(), partNames.size())); + String lastPartName = null; + List curList = null; + ColumnStatisticsDesc csd = null; + for (int i = 0; i <= mStats.size(); ++i) { + boolean isLast = i == mStats.size(); + MPartitionColumnStatistics mStatsObj = isLast ? null + : mStats.get(i); + String partName = isLast ? null : (String) mStatsObj + .getPartitionName(); + if (isLast || !partName.equals(lastPartName)) { + if (i != 0) { + result.add(new ColumnStatistics(csd, curList)); + } + if (isLast) { + continue; + } + csd = StatObjectConverter + .getPartitionColumnStatisticsDesc(mStatsObj); + curList = new ArrayList( + colNames.size()); + } + curList.add(StatObjectConverter + .getPartitionColumnStatisticsObj(mStatsObj)); + lastPartName = partName; + } + return result; + } + }.run(true); + } + + private List getMPartitionColumnStatistics( + Table table, List partNames, List colNames) + throws NoSuchObjectException, MetaException { + boolean committed = false; + MPartitionColumnStatistics mStatsObj = null; + try { + openTransaction(); + // We are not going to verify SD for each partition. Just verify for + // the table. + validateTableCols(table, colNames); + boolean foundCol = false; + Query query = pm.newQuery(MPartitionColumnStatistics.class); + String paramStr = "java.lang.String t1, java.lang.String t2"; + String filter = "tableName == t1 && dbName == t2 && ("; + Object[] params = new Object[colNames.size() + partNames.size() + 2]; + int i = 0; + params[i++] = table.getTableName(); + params[i++] = table.getDbName(); + int firstI = i; + for (String s : partNames) { + filter += ((i == firstI) ? "" : " || ") + "partitionName == p" + + i; + paramStr += ", java.lang.String p" + i; + params[i++] = s; + } + filter += ") && ("; + firstI = i; + for (String s : colNames) { + filter += ((i == firstI) ? "" : " || ") + "colName == c" + i; + paramStr += ", java.lang.String c" + i; + params[i++] = s; + } + filter += ")"; + query.setFilter(filter); + query.declareParameters(paramStr); + query.setOrdering("partitionName ascending"); + @SuppressWarnings("unchecked") + List result = (List) query + .executeWithArray(params); + pm.retrieveAll(result); + committed = commitTransaction(); + return result; + } catch (Exception ex) { + LOG.error("Error retrieving statistics via jdo", ex); + if (ex instanceof MetaException) { + throw (MetaException) ex; + } + throw new MetaException(ex.getMessage()); + } finally { + if (!committed) { + rollbackTransaction(); + return Lists.newArrayList(); + } + } + } + + private void dropPartitionColumnStatisticsNoTxn(String dbName, + String tableName, List partNames) throws MetaException { + ObjectPair queryWithParams = makeQueryByPartitionNames( + dbName, tableName, partNames, MPartitionColumnStatistics.class, + "tableName", "dbName", "partition.partitionName"); + queryWithParams.getFirst().deletePersistentAll( + queryWithParams.getSecond()); + } + + @Override + public boolean deletePartitionColumnStatistics(String dbName, + String tableName, String partName, List partVals, + String colName) throws NoSuchObjectException, MetaException, + InvalidObjectException, InvalidInputException { + boolean ret = false; + + if (dbName == null) { + dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + } + + if (tableName == null) { + throw new InvalidInputException("Table name is null."); + } + + try { + openTransaction(); + MTable mTable = getMTable(dbName, tableName); + MPartitionColumnStatistics mStatsObj; + List mStatsObjColl; + + if (mTable == null) { + throw new NoSuchObjectException( + "Table " + + tableName + + " for which stats deletion is requested doesn't exist"); + } + + MPartition mPartition = getMPartition(dbName, tableName, partVals); + + if (mPartition == null) { + throw new NoSuchObjectException( + "Partition " + + partName + + " for which stats deletion is requested doesn't exist"); + } + + Query query = pm.newQuery(MPartitionColumnStatistics.class); + String filter; + String parameters; + + if (colName != null) { + filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && " + + "colName == t4"; + parameters = "java.lang.String t1, java.lang.String t2, " + + "java.lang.String t3, java.lang.String t4"; + } else { + filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3"; + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + } + + query.setFilter(filter); + query.declareParameters(parameters); + + if (colName != null) { + query.setUnique(true); + mStatsObj = (MPartitionColumnStatistics) query + .executeWithArray(partName.trim(), dbName.trim(), + tableName.trim(), colName.trim()); + pm.retrieve(mStatsObj); + + if (mStatsObj != null) { + pm.deletePersistent(mStatsObj); + } else { + throw new NoSuchObjectException( + "Column stats doesn't exist for db=" + dbName + + " table=" + tableName + " partition=" + + partName + " col=" + colName); + } + } else { + mStatsObjColl = (List) query + .execute(partName.trim(), dbName.trim(), + tableName.trim()); + pm.retrieveAll(mStatsObjColl); + + if (mStatsObjColl != null) { + pm.deletePersistentAll(mStatsObjColl); + } else { + throw new NoSuchObjectException( + "Column stats doesn't exist for db=" + dbName + + " table=" + tableName + " partition" + + partName); + } + } + ret = commitTransaction(); + } catch (NoSuchObjectException e) { + rollbackTransaction(); + throw e; + } finally { + if (!ret) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public boolean deleteTableColumnStatistics(String dbName, String tableName, + String colName) throws NoSuchObjectException, MetaException, + InvalidObjectException, InvalidInputException { + boolean ret = false; + + if (dbName == null) { + dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + } + + if (tableName == null) { + throw new InvalidInputException("Table name is null."); + } + + try { + openTransaction(); + MTable mTable = getMTable(dbName, tableName); + MTableColumnStatistics mStatsObj; + List mStatsObjColl; + + if (mTable == null) { + throw new NoSuchObjectException( + "Table " + + tableName + + " for which stats deletion is requested doesn't exist"); + } + + Query query = pm.newQuery(MTableColumnStatistics.class); + String filter; + String parameters; + + if (colName != null) { + filter = "table.tableName == t1 && dbName == t2 && colName == t3"; + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + } else { + filter = "table.tableName == t1 && dbName == t2"; + parameters = "java.lang.String t1, java.lang.String t2"; + } + + query.setFilter(filter); + query.declareParameters(parameters); + + if (colName != null) { + query.setUnique(true); + mStatsObj = (MTableColumnStatistics) query.execute( + tableName.trim(), dbName.trim(), colName.trim()); + pm.retrieve(mStatsObj); + + if (mStatsObj != null) { + pm.deletePersistent(mStatsObj); + } else { + throw new NoSuchObjectException( + "Column stats doesn't exist for db=" + dbName + + " table=" + tableName + " col=" + colName); + } + } else { + mStatsObjColl = (List) query.execute( + tableName.trim(), dbName.trim()); + pm.retrieveAll(mStatsObjColl); + + if (mStatsObjColl != null) { + pm.deletePersistentAll(mStatsObjColl); + } else { + throw new NoSuchObjectException( + "Column stats doesn't exist for db=" + dbName + + " table=" + tableName); + } + } + ret = commitTransaction(); + } catch (NoSuchObjectException e) { + rollbackTransaction(); + throw e; + } finally { + if (!ret) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public long cleanupEvents() { + boolean commited = false; + long delCnt; + LOG.debug("Begin executing cleanupEvents"); + Long expiryTime = HiveConf.getLongVar(getConf(), + ConfVars.METASTORE_EVENT_EXPIRY_DURATION) * 1000L; + Long curTime = System.currentTimeMillis(); + try { + openTransaction(); + Query query = pm.newQuery(MPartitionEvent.class, + "curTime - eventTime > expiryTime"); + query.declareParameters("java.lang.Long curTime, java.lang.Long expiryTime"); + delCnt = query.deletePersistentAll(curTime, expiryTime); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + LOG.debug("Done executing cleanupEvents"); + } + return delCnt; + } + + private MDelegationToken getTokenFrom(String tokenId) { + Query query = pm.newQuery(MDelegationToken.class, + "tokenIdentifier == tokenId"); + query.declareParameters("java.lang.String tokenId"); + query.setUnique(true); + return (MDelegationToken) query.execute(tokenId); + } + + @Override + public boolean addToken(String tokenId, String delegationToken) { + + LOG.debug("Begin executing addToken"); + boolean committed = false; + MDelegationToken token; + try { + openTransaction(); + token = getTokenFrom(tokenId); + if (token == null) { + // add Token, only if it already doesn't exist + pm.makePersistent(new MDelegationToken(tokenId, delegationToken)); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + LOG.debug("Done executing addToken with status : " + committed); + return committed && (token == null); + } + + @Override + public boolean removeToken(String tokenId) { + + LOG.debug("Begin executing removeToken"); + boolean committed = false; + MDelegationToken token; + try { + openTransaction(); + token = getTokenFrom(tokenId); + if (null != token) { + pm.deletePersistent(token); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + LOG.debug("Done executing removeToken with status : " + committed); + return committed && (token != null); + } + + @Override + public String getToken(String tokenId) { + + LOG.debug("Begin executing getToken"); + boolean committed = false; + MDelegationToken token; + try { + openTransaction(); + token = getTokenFrom(tokenId); + if (null != token) { + pm.retrieve(token); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + LOG.debug("Done executing getToken with status : " + committed); + return (null == token) ? null : token.getTokenStr(); + } + + @Override + public List getAllTokenIdentifiers() { + + LOG.debug("Begin executing getAllTokenIdentifiers"); + boolean committed = false; + List tokens; + try { + openTransaction(); + Query query = pm.newQuery(MDelegationToken.class); + tokens = (List) query.execute(); + pm.retrieveAll(tokens); + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + LOG.debug("Done executing getAllTokenIdentifers with status : " + + committed); + List tokenIdents = new ArrayList(tokens.size()); + + for (MDelegationToken token : tokens) { + tokenIdents.add(token.getTokenIdentifier()); + } + return tokenIdents; + } + + @Override + public int addMasterKey(String key) throws MetaException { + LOG.debug("Begin executing addMasterKey"); + boolean committed = false; + MMasterKey masterKey = new MMasterKey(key); + try { + openTransaction(); + pm.makePersistent(masterKey); + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + LOG.debug("Done executing addMasterKey with status : " + committed); + if (committed) { + return ((IntIdentity) pm.getObjectId(masterKey)).getKey(); + } else { + throw new MetaException("Failed to add master key."); + } + } + + @Override + public void updateMasterKey(Integer id, String key) + throws NoSuchObjectException, MetaException { + LOG.debug("Begin executing updateMasterKey"); + boolean committed = false; + MMasterKey masterKey; + try { + openTransaction(); + Query query = pm.newQuery(MMasterKey.class, "keyId == id"); + query.declareParameters("java.lang.Integer id"); + query.setUnique(true); + masterKey = (MMasterKey) query.execute(id); + if (null != masterKey) { + masterKey.setMasterKey(key); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + LOG.debug("Done executing updateMasterKey with status : " + committed); + if (null == masterKey) { + throw new NoSuchObjectException("No key found with keyId: " + id); + } + if (!committed) { + throw new MetaException( + "Though key is found, failed to update it. " + id); + } + } + + @Override + public boolean removeMasterKey(Integer id) { + LOG.debug("Begin executing removeMasterKey"); + boolean success = false; + MMasterKey masterKey; + try { + openTransaction(); + Query query = pm.newQuery(MMasterKey.class, "keyId == id"); + query.declareParameters("java.lang.Integer id"); + query.setUnique(true); + masterKey = (MMasterKey) query.execute(id); + if (null != masterKey) { + pm.deletePersistent(masterKey); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + LOG.debug("Done executing removeMasterKey with status : " + success); + return (null != masterKey) && success; + } + + @Override + public String[] getMasterKeys() { + LOG.debug("Begin executing getMasterKeys"); + boolean committed = false; + List keys; + try { + openTransaction(); + Query query = pm.newQuery(MMasterKey.class); + keys = (List) query.execute(); + pm.retrieveAll(keys); + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + LOG.debug("Done executing getMasterKeys with status : " + committed); + String[] masterKeys = new String[keys.size()]; + + for (int i = 0; i < keys.size(); i++) { + masterKeys[i] = keys.get(i).getMasterKey(); + } + return masterKeys; + } + + // compare hive version and metastore version + @Override + public void verifySchema() throws MetaException { + // If the schema version is already checked, then go ahead and use this + // metastore + if (isSchemaVerified.get()) { + return; + } + checkSchema(); + } + + private synchronized void checkSchema() throws MetaException { + // recheck if it got verified by another thread while we were waiting + if (isSchemaVerified.get()) { + return; + } + + boolean strictValidation = HiveConf.getBoolVar(getConf(), + HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION); + // read the schema version stored in metastore db + String schemaVer = getMetaStoreSchemaVersion(); + if (schemaVer == null) { + // metastore has no schema version information + if (strictValidation) { + throw new MetaException( + "Version information not found in metastore. "); + } else { + LOG.warn("Version information not found in metastore. " + + HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION + .toString() + + " is not enabled so recording the schema version " + + MetaStoreSchemaInfo.getHiveSchemaVersion()); + setMetaStoreSchemaVersion( + MetaStoreSchemaInfo.getHiveSchemaVersion(), + "Set by MetaStore"); + } + } else { + // metastore schema version is different than Hive distribution + // needs + if (strictValidation) { + if (!schemaVer.equalsIgnoreCase(MetaStoreSchemaInfo + .getHiveSchemaVersion())) { + throw new MetaException("Hive Schema version " + + MetaStoreSchemaInfo.getHiveSchemaVersion() + + " does not match metastore's schema version " + + schemaVer + + " Metastore is not upgraded or corrupt"); + } else { + LOG.warn("Metastore version was " + + schemaVer + + " " + + HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION + .toString() + + " is not enabled so recording the new schema version " + + MetaStoreSchemaInfo.getHiveSchemaVersion()); + setMetaStoreSchemaVersion( + MetaStoreSchemaInfo.getHiveSchemaVersion(), + "Set by MetaStore"); + } + } + } + isSchemaVerified.set(true); + return; + } + + // load the schema version stored in metastore db + @Override + public String getMetaStoreSchemaVersion() throws MetaException { + + MVersionTable mSchemaVer; + try { + mSchemaVer = getMSchemaVersion(); + } catch (NoSuchObjectException e) { + return null; + } + return mSchemaVer.getSchemaVersion(); + } + + @SuppressWarnings("unchecked") + private MVersionTable getMSchemaVersion() throws NoSuchObjectException, + MetaException { + boolean committed = false; + List mVerTables = new ArrayList(); + + try { + openTransaction(); + Query query = pm.newQuery(MVersionTable.class); + + try { + mVerTables = (List) query.execute(); + pm.retrieveAll(mVerTables); + } catch (JDODataStoreException e) { + if (e.getCause() instanceof MissingTableException) { + throw new MetaException("Version table not found. " + + "The metastore is not upgraded to " + + MetaStoreSchemaInfo.getHiveSchemaVersion()); + } else { + throw e; + } + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + if (mVerTables.isEmpty()) { + throw new NoSuchObjectException("No matching version found"); + } + if (mVerTables.size() > 1) { + throw new MetaException("Metastore contains multiple versions"); + } + return mVerTables.get(0); + } + + @Override + public void setMetaStoreSchemaVersion(String schemaVersion, String comment) + throws MetaException { + MVersionTable mSchemaVer; + boolean commited = false; + + try { + mSchemaVer = getMSchemaVersion(); + } catch (NoSuchObjectException e) { + // if the version doesn't exist, then create it + mSchemaVer = new MVersionTable(); + } + + mSchemaVer.setSchemaVersion(schemaVersion); + mSchemaVer.setVersionComment(comment); + try { + openTransaction(); + pm.makePersistent(mSchemaVer); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + } + + @Override + public boolean doesPartitionExist(String dbName, String tableName, + List partVals) throws MetaException { + boolean success = false; + try { + openTransaction(); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); + + // TODO: this could also be passed from upper layer; or this method + // should filter the list. + MTable mtbl = getMTable(dbName, tableName); + if (mtbl == null) { + success = commitTransaction(); + return false; + } + + Query query = pm + .newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where table.tableName == t1 && table.database.name == t2 && partitionName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setUnique(true); + query.setResult("partitionName"); + String name = Warehouse.makePartName( + convertToFieldSchemas(mtbl.getPartitionKeys()), partVals); + String result = (String) query.execute(tableName, dbName, name); + success = commitTransaction(); + return result != null; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private void debugLog(String message) { + if (LOG.isDebugEnabled()) { + LOG.debug(message + getCallStack()); + } + } + + private static final int stackLimit = 5; + + private String getCallStack() { + StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); + int thislimit = Math.min(stackLimit, stackTrace.length); + StringBuilder sb = new StringBuilder(); + sb.append(" at:"); + for (int i = 4; i < thislimit; i++) { + sb.append("\n\t"); + sb.append(stackTrace[i].toString()); + } + return sb.toString(); + } + + private Function convertToFunction(MFunction mfunc) { + if (mfunc == null) { + return null; + } + + Function func = new Function(mfunc.getFunctionName(), mfunc + .getDatabase().getName(), mfunc.getClassName(), + mfunc.getOwnerName(), PrincipalType.valueOf(mfunc + .getOwnerType()), mfunc.getCreateTime(), + FunctionType.findByValue(mfunc.getFunctionType()), + convertToResourceUriList(mfunc.getResourceUris())); + return func; + } + + private MFunction convertToMFunction(Function func) + throws InvalidObjectException { + if (func == null) { + return null; + } + + MDatabase mdb = null; + try { + mdb = getMDatabase(func.getDbName()); + } catch (NoSuchObjectException e) { + LOG.error(StringUtils.stringifyException(e)); + throw new InvalidObjectException("Database " + func.getDbName() + + " doesn't exist."); + } + + MFunction mfunc = new MFunction(func.getFunctionName(), mdb, + func.getClassName(), func.getOwnerName(), func.getOwnerType() + .name(), func.getCreateTime(), func.getFunctionType() + .getValue(), + convertToMResourceUriList(func.getResourceUris())); + return mfunc; + } + + private List convertToResourceUriList( + List mresourceUriList) { + List resourceUriList = null; + if (mresourceUriList != null) { + resourceUriList = new ArrayList( + mresourceUriList.size()); + for (MResourceUri mres : mresourceUriList) { + resourceUriList.add(new ResourceUri(ResourceType + .findByValue(mres.getResourceType()), mres.getUri())); + } + } + return resourceUriList; + } + + private List convertToMResourceUriList( + List resourceUriList) { + List mresourceUriList = null; + if (resourceUriList != null) { + mresourceUriList = new ArrayList( + resourceUriList.size()); + for (ResourceUri res : resourceUriList) { + mresourceUriList.add(new MResourceUri(res.getResourceType() + .getValue(), res.getUri())); + } + } + return mresourceUriList; + } + + @Override + public void createFunction(Function func) throws InvalidObjectException, + MetaException { + boolean committed = false; + try { + openTransaction(); + MFunction mfunc = convertToMFunction(func); + pm.makePersistent(mfunc); + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + @Override + public void alterFunction(String dbName, String funcName, + Function newFunction) throws InvalidObjectException, MetaException { + boolean success = false; + try { + openTransaction(); + funcName = funcName.toLowerCase(); + dbName = dbName.toLowerCase(); + MFunction newf = convertToMFunction(newFunction); + if (newf == null) { + throw new InvalidObjectException("new function is invalid"); + } + + MFunction oldf = getMFunction(dbName, funcName); + if (oldf == null) { + throw new MetaException("function " + funcName + + " doesn't exist"); + } + + // For now only alter name, owner, class name, type + oldf.setFunctionName(newf.getFunctionName().toLowerCase()); + oldf.setDatabase(newf.getDatabase()); + oldf.setOwnerName(newf.getOwnerName()); + oldf.setOwnerType(newf.getOwnerType()); + oldf.setClassName(newf.getClassName()); + oldf.setFunctionType(newf.getFunctionType()); + + // commit the changes + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override + public void dropFunction(String dbName, String funcName) + throws MetaException, NoSuchObjectException, + InvalidObjectException, InvalidInputException { + boolean success = false; + try { + openTransaction(); + MFunction mfunc = getMFunction(dbName, funcName); + pm.retrieve(mfunc); + if (mfunc != null) { + // TODO: When function privileges are implemented, they should + // be deleted here. + pm.deletePersistentAll(mfunc); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + private MFunction getMFunction(String db, String function) { + MFunction mfunc = null; + boolean commited = false; + try { + openTransaction(); + db = db.toLowerCase().trim(); + function = function.toLowerCase().trim(); + Query query = pm.newQuery(MFunction.class, + "functionName == function && database.name == db"); + query.declareParameters("java.lang.String function, java.lang.String db"); + query.setUnique(true); + mfunc = (MFunction) query.execute(function, db); + pm.retrieve(mfunc); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mfunc; + } + + @Override + public Function getFunction(String dbName, String funcName) + throws MetaException { + boolean commited = false; + Function func = null; + try { + openTransaction(); + func = convertToFunction(getMFunction(dbName, funcName)); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return func; + } + + @Override + public List getFunctions(String dbName, String pattern) + throws MetaException { + boolean commited = false; + List funcs = null; + try { + openTransaction(); + dbName = dbName.toLowerCase().trim(); + // Take the pattern and split it on the | to get all the composing + // patterns + String[] subpatterns = pattern.trim().split("\\|"); + String query = "select functionName from org.apache.hadoop.hive.metastore.model.MFunction " + + "where database.name == dbName && ("; + boolean first = true; + for (String subpattern : subpatterns) { + subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); + if (!first) { + query = query + " || "; + } + query = query + " functionName.matches(\"" + subpattern + "\")"; + first = false; + } + query = query + ")"; + + Query q = pm.newQuery(query); + q.declareParameters("java.lang.String dbName"); + q.setResult("functionName"); + q.setOrdering("functionName ascending"); + Collection names = (Collection) q.execute(dbName); + funcs = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + funcs.add((String) i.next()); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return funcs; + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index c3e2820..cdad9f1 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -42,381 +42,446 @@ /** * This class contains conversion logic that creates Thrift stat objects from - * JDO stat objects and plain arrays from DirectSQL. - * It is hidden here so that we wouldn't have to look at it in elsewhere. + * JDO stat objects and plain arrays from DirectSQL. It is hidden here so that + * we wouldn't have to look at it in elsewhere. */ public class StatObjectConverter { - // JDO - public static MTableColumnStatistics convertToMTableColumnStatistics(MTable table, - ColumnStatisticsDesc statsDesc, ColumnStatisticsObj statsObj) - throws NoSuchObjectException, MetaException, InvalidObjectException { - if (statsObj == null || statsDesc == null) { - throw new InvalidObjectException("Invalid column stats object"); - } + // JDO + public static MTableColumnStatistics convertToMTableColumnStatistics( + MTable table, ColumnStatisticsDesc statsDesc, + ColumnStatisticsObj statsObj) throws NoSuchObjectException, + MetaException, InvalidObjectException { + if (statsObj == null || statsDesc == null) { + throw new InvalidObjectException("Invalid column stats object"); + } - MTableColumnStatistics mColStats = new MTableColumnStatistics(); - mColStats.setTable(table); - mColStats.setDbName(statsDesc.getDbName()); - mColStats.setTableName(statsDesc.getTableName()); - mColStats.setLastAnalyzed(statsDesc.getLastAnalyzed()); - mColStats.setColName(statsObj.getColName()); - mColStats.setColType(statsObj.getColType()); + MTableColumnStatistics mColStats = new MTableColumnStatistics(); + mColStats.setTable(table); + mColStats.setDbName(statsDesc.getDbName()); + mColStats.setTableName(statsDesc.getTableName()); + mColStats.setLastAnalyzed(statsDesc.getLastAnalyzed()); + mColStats.setColName(statsObj.getColName()); + mColStats.setColType(statsObj.getColType()); - if (statsObj.getStatsData().isSetBooleanStats()) { - BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats(); - mColStats.setBooleanStats(boolStats.getNumTrues(), boolStats.getNumFalses(), - boolStats.getNumNulls()); - } else if (statsObj.getStatsData().isSetLongStats()) { - LongColumnStatsData longStats = statsObj.getStatsData().getLongStats(); - mColStats.setLongStats(longStats.getNumNulls(), longStats.getNumDVs(), - longStats.isSetLowValue() ? longStats.getLowValue() : null, - longStats.isSetHighValue() ? longStats.getHighValue() : null); - } else if (statsObj.getStatsData().isSetDoubleStats()) { - DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats(); - mColStats.setDoubleStats(doubleStats.getNumNulls(), doubleStats.getNumDVs(), - doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null, - doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null); - } else if (statsObj.getStatsData().isSetDecimalStats()) { - DecimalColumnStatsData decimalStats = statsObj.getStatsData().getDecimalStats(); - String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null; - String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null; - mColStats.setDecimalStats(decimalStats.getNumNulls(), decimalStats.getNumDVs(), low, high); - } else if (statsObj.getStatsData().isSetStringStats()) { - StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats(); - mColStats.setStringStats(stringStats.getNumNulls(), stringStats.getNumDVs(), - stringStats.getMaxColLen(), stringStats.getAvgColLen()); - } else if (statsObj.getStatsData().isSetBinaryStats()) { - BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats(); - mColStats.setBinaryStats(binaryStats.getNumNulls(), binaryStats.getMaxColLen(), - binaryStats.getAvgColLen()); - } - return mColStats; - } + if (statsObj.getStatsData().isSetBooleanStats()) { + BooleanColumnStatsData boolStats = statsObj.getStatsData() + .getBooleanStats(); + mColStats.setBooleanStats(boolStats.getNumTrues(), + boolStats.getNumFalses(), boolStats.getNumNulls()); + } else if (statsObj.getStatsData().isSetLongStats()) { + LongColumnStatsData longStats = statsObj.getStatsData() + .getLongStats(); + mColStats.setLongStats(longStats.getNumNulls(), longStats + .getNumDVs(), + longStats.isSetLowValue() ? longStats.getLowValue() : null, + longStats.isSetHighValue() ? longStats.getHighValue() + : null); + } else if (statsObj.getStatsData().isSetDoubleStats()) { + DoubleColumnStatsData doubleStats = statsObj.getStatsData() + .getDoubleStats(); + mColStats.setDoubleStats(doubleStats.getNumNulls(), doubleStats + .getNumDVs(), + doubleStats.isSetLowValue() ? doubleStats.getLowValue() + : null, + doubleStats.isSetHighValue() ? doubleStats.getHighValue() + : null); + } else if (statsObj.getStatsData().isSetDecimalStats()) { + DecimalColumnStatsData decimalStats = statsObj.getStatsData() + .getDecimalStats(); + String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats + .getLowValue()) : null; + String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats + .getHighValue()) : null; + mColStats.setDecimalStats(decimalStats.getNumNulls(), + decimalStats.getNumDVs(), low, high); + } else if (statsObj.getStatsData().isSetStringStats()) { + StringColumnStatsData stringStats = statsObj.getStatsData() + .getStringStats(); + mColStats.setStringStats(stringStats.getNumNulls(), + stringStats.getNumDVs(), stringStats.getMaxColLen(), + stringStats.getAvgColLen()); + } else if (statsObj.getStatsData().isSetBinaryStats()) { + BinaryColumnStatsData binaryStats = statsObj.getStatsData() + .getBinaryStats(); + mColStats.setBinaryStats(binaryStats.getNumNulls(), + binaryStats.getMaxColLen(), binaryStats.getAvgColLen()); + } + return mColStats; + } - public static void setFieldsIntoOldStats( - MTableColumnStatistics mStatsObj, MTableColumnStatistics oldStatsObj) { - oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen()); - if (mStatsObj.getLongHighValue() != null) { - oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue()); - } - if (mStatsObj.getLongLowValue() != null) { - oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue()); - } - if (mStatsObj.getDoubleLowValue() != null) { - oldStatsObj.setDoubleLowValue(mStatsObj.getDoubleLowValue()); - } - if (mStatsObj.getDoubleHighValue() != null) { - oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue()); - } - if (mStatsObj.getDecimalLowValue() != null) { - oldStatsObj.setDecimalLowValue(mStatsObj.getDecimalLowValue()); - } - if (mStatsObj.getDecimalHighValue() != null) { - oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue()); - } - oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen()); - oldStatsObj.setNumDVs(mStatsObj.getNumDVs()); - oldStatsObj.setNumFalses(mStatsObj.getNumFalses()); - oldStatsObj.setNumTrues(mStatsObj.getNumTrues()); - oldStatsObj.setNumNulls(mStatsObj.getNumNulls()); - oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed()); - } + public static void setFieldsIntoOldStats(MTableColumnStatistics mStatsObj, + MTableColumnStatistics oldStatsObj) { + if (mStatsObj.getLongHighValue() != null) { + oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue()); + } + if (mStatsObj.getLongLowValue() != null) { + oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue()); + } + if (mStatsObj.getDoubleLowValue() != null) { + oldStatsObj.setDoubleLowValue(mStatsObj.getDoubleLowValue()); + } + if (mStatsObj.getDoubleHighValue() != null) { + oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue()); + } + if (mStatsObj.getDecimalLowValue() != null) { + oldStatsObj.setDecimalLowValue(mStatsObj.getDecimalLowValue()); + } + if (mStatsObj.getDecimalHighValue() != null) { + oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue()); + } + if (mStatsObj.getAvgColLen() != null) + oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen()); + if (mStatsObj.getMaxColLen() != null) + oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen()); + if (mStatsObj.getNumDVs() != null) + oldStatsObj.setNumDVs(mStatsObj.getNumDVs()); + if (mStatsObj.getNumFalses() != null) + oldStatsObj.setNumFalses(mStatsObj.getNumFalses()); + if (mStatsObj.getNumTrues() != null) + oldStatsObj.setNumTrues(mStatsObj.getNumTrues()); + if (mStatsObj.getNumNulls() != null) + oldStatsObj.setNumNulls(mStatsObj.getNumNulls()); + oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed()); + } - public static void setFieldsIntoOldStats( - MPartitionColumnStatistics mStatsObj, MPartitionColumnStatistics oldStatsObj) { - oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen()); - oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue()); - oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue()); - oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed()); - oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue()); - oldStatsObj.setDoubleLowValue(mStatsObj.getDoubleLowValue()); - oldStatsObj.setDecimalLowValue(mStatsObj.getDecimalLowValue()); - oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue()); - oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen()); - oldStatsObj.setNumDVs(mStatsObj.getNumDVs()); - oldStatsObj.setNumFalses(mStatsObj.getNumFalses()); - oldStatsObj.setNumTrues(mStatsObj.getNumTrues()); - oldStatsObj.setNumNulls(mStatsObj.getNumNulls()); - } + public static void setFieldsIntoOldStats( + MPartitionColumnStatistics mStatsObj, + MPartitionColumnStatistics oldStatsObj) { + if (mStatsObj.getLongHighValue() != null) { + oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue()); + } + if (mStatsObj.getLongLowValue() != null) { + oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue()); + } + if (mStatsObj.getDoubleLowValue() != null) { + oldStatsObj.setDoubleLowValue(mStatsObj.getDoubleLowValue()); + } + if (mStatsObj.getDoubleHighValue() != null) { + oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue()); + } + if (mStatsObj.getDecimalLowValue() != null) { + oldStatsObj.setDecimalLowValue(mStatsObj.getDecimalLowValue()); + } + if (mStatsObj.getDecimalHighValue() != null) { + oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue()); + } + if (mStatsObj.getAvgColLen() != null) + oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen()); + if (mStatsObj.getMaxColLen() != null) + oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen()); + if (mStatsObj.getNumDVs() != null) + oldStatsObj.setNumDVs(mStatsObj.getNumDVs()); + if (mStatsObj.getNumFalses() != null) + oldStatsObj.setNumFalses(mStatsObj.getNumFalses()); + if (mStatsObj.getNumTrues() != null) + oldStatsObj.setNumTrues(mStatsObj.getNumTrues()); + if (mStatsObj.getNumNulls() != null) + oldStatsObj.setNumNulls(mStatsObj.getNumNulls()); + oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed()); + } - public static ColumnStatisticsObj getTableColumnStatisticsObj( - MTableColumnStatistics mStatsObj) { - ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); - statsObj.setColType(mStatsObj.getColType()); - statsObj.setColName(mStatsObj.getColName()); - String colType = mStatsObj.getColType().toLowerCase(); - ColumnStatisticsData colStatsData = new ColumnStatisticsData(); + public static ColumnStatisticsObj getTableColumnStatisticsObj( + MTableColumnStatistics mStatsObj) { + ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); + statsObj.setColType(mStatsObj.getColType()); + statsObj.setColName(mStatsObj.getColName()); + String colType = mStatsObj.getColType().toLowerCase(); + ColumnStatisticsData colStatsData = new ColumnStatisticsData(); - if (colType.equals("boolean")) { - BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); - boolStats.setNumFalses(mStatsObj.getNumFalses()); - boolStats.setNumTrues(mStatsObj.getNumTrues()); - boolStats.setNumNulls(mStatsObj.getNumNulls()); - colStatsData.setBooleanStats(boolStats); - } else if (colType.equals("string") || - colType.startsWith("varchar") || colType.startsWith("char")) { - StringColumnStatsData stringStats = new StringColumnStatsData(); - stringStats.setNumNulls(mStatsObj.getNumNulls()); - stringStats.setAvgColLen(mStatsObj.getAvgColLen()); - stringStats.setMaxColLen(mStatsObj.getMaxColLen()); - stringStats.setNumDVs(mStatsObj.getNumDVs()); - colStatsData.setStringStats(stringStats); - } else if (colType.equals("binary")) { - BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); - binaryStats.setNumNulls(mStatsObj.getNumNulls()); - binaryStats.setAvgColLen(mStatsObj.getAvgColLen()); - binaryStats.setMaxColLen(mStatsObj.getMaxColLen()); - colStatsData.setBinaryStats(binaryStats); - } else if (colType.equals("bigint") || colType.equals("int") || - colType.equals("smallint") || colType.equals("tinyint") || - colType.equals("timestamp")) { - LongColumnStatsData longStats = new LongColumnStatsData(); - longStats.setNumNulls(mStatsObj.getNumNulls()); - Long longHighValue = mStatsObj.getLongHighValue(); - if (longHighValue != null) { - longStats.setHighValue(longHighValue); - } - Long longLowValue = mStatsObj.getLongLowValue(); - if (longLowValue != null) { - longStats.setLowValue(longLowValue); - } - longStats.setNumDVs(mStatsObj.getNumDVs()); - colStatsData.setLongStats(longStats); - } else if (colType.equals("double") || colType.equals("float")) { - DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); - doubleStats.setNumNulls(mStatsObj.getNumNulls()); - Double doubleHighValue = mStatsObj.getDoubleHighValue(); - if (doubleHighValue != null) { - doubleStats.setHighValue(doubleHighValue); - } - Double doubleLowValue = mStatsObj.getDoubleLowValue(); - if (doubleLowValue != null) { - doubleStats.setLowValue(doubleLowValue); - } - doubleStats.setNumDVs(mStatsObj.getNumDVs()); - colStatsData.setDoubleStats(doubleStats); - } else if (colType.startsWith("decimal")) { - DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); - decimalStats.setNumNulls(mStatsObj.getNumNulls()); - String decimalHighValue = mStatsObj.getDecimalHighValue(); - if (decimalHighValue != null) { - decimalStats.setHighValue(createThriftDecimal(decimalHighValue)); - } - String decimalLowValue = mStatsObj.getDecimalLowValue(); - if (decimalLowValue != null) { - decimalStats.setLowValue(createThriftDecimal(decimalLowValue)); - } - decimalStats.setNumDVs(mStatsObj.getNumDVs()); - colStatsData.setDecimalStats(decimalStats); - } - statsObj.setStatsData(colStatsData); - return statsObj; - } + if (colType.equals("boolean")) { + BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); + boolStats.setNumFalses(mStatsObj.getNumFalses()); + boolStats.setNumTrues(mStatsObj.getNumTrues()); + boolStats.setNumNulls(mStatsObj.getNumNulls()); + colStatsData.setBooleanStats(boolStats); + } else if (colType.equals("string") || colType.startsWith("varchar") + || colType.startsWith("char")) { + StringColumnStatsData stringStats = new StringColumnStatsData(); + stringStats.setNumNulls(mStatsObj.getNumNulls()); + stringStats.setAvgColLen(mStatsObj.getAvgColLen()); + stringStats.setMaxColLen(mStatsObj.getMaxColLen()); + stringStats.setNumDVs(mStatsObj.getNumDVs()); + colStatsData.setStringStats(stringStats); + } else if (colType.equals("binary")) { + BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); + binaryStats.setNumNulls(mStatsObj.getNumNulls()); + binaryStats.setAvgColLen(mStatsObj.getAvgColLen()); + binaryStats.setMaxColLen(mStatsObj.getMaxColLen()); + colStatsData.setBinaryStats(binaryStats); + } else if (colType.equals("bigint") || colType.equals("int") + || colType.equals("smallint") || colType.equals("tinyint") + || colType.equals("timestamp")) { + LongColumnStatsData longStats = new LongColumnStatsData(); + longStats.setNumNulls(mStatsObj.getNumNulls()); + Long longHighValue = mStatsObj.getLongHighValue(); + if (longHighValue != null) { + longStats.setHighValue(longHighValue); + } + Long longLowValue = mStatsObj.getLongLowValue(); + if (longLowValue != null) { + longStats.setLowValue(longLowValue); + } + longStats.setNumDVs(mStatsObj.getNumDVs()); + colStatsData.setLongStats(longStats); + } else if (colType.equals("double") || colType.equals("float")) { + DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); + doubleStats.setNumNulls(mStatsObj.getNumNulls()); + Double doubleHighValue = mStatsObj.getDoubleHighValue(); + if (doubleHighValue != null) { + doubleStats.setHighValue(doubleHighValue); + } + Double doubleLowValue = mStatsObj.getDoubleLowValue(); + if (doubleLowValue != null) { + doubleStats.setLowValue(doubleLowValue); + } + doubleStats.setNumDVs(mStatsObj.getNumDVs()); + colStatsData.setDoubleStats(doubleStats); + } else if (colType.startsWith("decimal")) { + DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); + decimalStats.setNumNulls(mStatsObj.getNumNulls()); + String decimalHighValue = mStatsObj.getDecimalHighValue(); + if (decimalHighValue != null) { + decimalStats + .setHighValue(createThriftDecimal(decimalHighValue)); + } + String decimalLowValue = mStatsObj.getDecimalLowValue(); + if (decimalLowValue != null) { + decimalStats.setLowValue(createThriftDecimal(decimalLowValue)); + } + decimalStats.setNumDVs(mStatsObj.getNumDVs()); + colStatsData.setDecimalStats(decimalStats); + } + statsObj.setStatsData(colStatsData); + return statsObj; + } - public static ColumnStatisticsDesc getTableColumnStatisticsDesc( - MTableColumnStatistics mStatsObj) { - ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); - statsDesc.setIsTblLevel(true); - statsDesc.setDbName(mStatsObj.getDbName()); - statsDesc.setTableName(mStatsObj.getTableName()); - statsDesc.setLastAnalyzed(mStatsObj.getLastAnalyzed()); - return statsDesc; - } + public static ColumnStatisticsDesc getTableColumnStatisticsDesc( + MTableColumnStatistics mStatsObj) { + ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); + statsDesc.setIsTblLevel(true); + statsDesc.setDbName(mStatsObj.getDbName()); + statsDesc.setTableName(mStatsObj.getTableName()); + statsDesc.setLastAnalyzed(mStatsObj.getLastAnalyzed()); + return statsDesc; + } - public static MPartitionColumnStatistics convertToMPartitionColumnStatistics( - MPartition partition, ColumnStatisticsDesc statsDesc, ColumnStatisticsObj statsObj) - throws MetaException, NoSuchObjectException { - if (statsDesc == null || statsObj == null) { - return null; - } + public static MPartitionColumnStatistics convertToMPartitionColumnStatistics( + MPartition partition, ColumnStatisticsDesc statsDesc, + ColumnStatisticsObj statsObj) throws MetaException, + NoSuchObjectException { + if (statsDesc == null || statsObj == null) { + return null; + } - MPartitionColumnStatistics mColStats = new MPartitionColumnStatistics(); - mColStats.setPartition(partition); - mColStats.setDbName(statsDesc.getDbName()); - mColStats.setTableName(statsDesc.getTableName()); - mColStats.setPartitionName(statsDesc.getPartName()); - mColStats.setLastAnalyzed(statsDesc.getLastAnalyzed()); - mColStats.setColName(statsObj.getColName()); - mColStats.setColType(statsObj.getColType()); + MPartitionColumnStatistics mColStats = new MPartitionColumnStatistics(); + mColStats.setPartition(partition); + mColStats.setDbName(statsDesc.getDbName()); + mColStats.setTableName(statsDesc.getTableName()); + mColStats.setPartitionName(statsDesc.getPartName()); + mColStats.setLastAnalyzed(statsDesc.getLastAnalyzed()); + mColStats.setColName(statsObj.getColName()); + mColStats.setColType(statsObj.getColType()); - if (statsObj.getStatsData().isSetBooleanStats()) { - BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats(); - mColStats.setBooleanStats(boolStats.getNumTrues(), boolStats.getNumFalses(), - boolStats.getNumNulls()); - } else if (statsObj.getStatsData().isSetLongStats()) { - LongColumnStatsData longStats = statsObj.getStatsData().getLongStats(); - mColStats.setLongStats(longStats.getNumNulls(), longStats.getNumDVs(), - longStats.isSetLowValue() ? longStats.getLowValue() : null, - longStats.isSetHighValue() ? longStats.getHighValue() : null); - } else if (statsObj.getStatsData().isSetDoubleStats()) { - DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats(); - mColStats.setDoubleStats(doubleStats.getNumNulls(), doubleStats.getNumDVs(), - doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null, - doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null); - } else if (statsObj.getStatsData().isSetDecimalStats()) { - DecimalColumnStatsData decimalStats = statsObj.getStatsData().getDecimalStats(); - String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null; - String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null; - mColStats.setDecimalStats(decimalStats.getNumNulls(), decimalStats.getNumDVs(), low, high); - } else if (statsObj.getStatsData().isSetStringStats()) { - StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats(); - mColStats.setStringStats(stringStats.getNumNulls(), stringStats.getNumDVs(), - stringStats.getMaxColLen(), stringStats.getAvgColLen()); - } else if (statsObj.getStatsData().isSetBinaryStats()) { - BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats(); - mColStats.setBinaryStats(binaryStats.getNumNulls(), binaryStats.getMaxColLen(), - binaryStats.getAvgColLen()); - } - return mColStats; - } + if (statsObj.getStatsData().isSetBooleanStats()) { + BooleanColumnStatsData boolStats = statsObj.getStatsData() + .getBooleanStats(); + mColStats.setBooleanStats(boolStats.getNumTrues(), + boolStats.getNumFalses(), boolStats.getNumNulls()); + } else if (statsObj.getStatsData().isSetLongStats()) { + LongColumnStatsData longStats = statsObj.getStatsData() + .getLongStats(); + mColStats.setLongStats(longStats.getNumNulls(), longStats + .getNumDVs(), + longStats.isSetLowValue() ? longStats.getLowValue() : null, + longStats.isSetHighValue() ? longStats.getHighValue() + : null); + } else if (statsObj.getStatsData().isSetDoubleStats()) { + DoubleColumnStatsData doubleStats = statsObj.getStatsData() + .getDoubleStats(); + mColStats.setDoubleStats(doubleStats.getNumNulls(), doubleStats + .getNumDVs(), + doubleStats.isSetLowValue() ? doubleStats.getLowValue() + : null, + doubleStats.isSetHighValue() ? doubleStats.getHighValue() + : null); + } else if (statsObj.getStatsData().isSetDecimalStats()) { + DecimalColumnStatsData decimalStats = statsObj.getStatsData() + .getDecimalStats(); + String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats + .getLowValue()) : null; + String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats + .getHighValue()) : null; + mColStats.setDecimalStats(decimalStats.getNumNulls(), + decimalStats.getNumDVs(), low, high); + } else if (statsObj.getStatsData().isSetStringStats()) { + StringColumnStatsData stringStats = statsObj.getStatsData() + .getStringStats(); + mColStats.setStringStats(stringStats.getNumNulls(), + stringStats.getNumDVs(), stringStats.getMaxColLen(), + stringStats.getAvgColLen()); + } else if (statsObj.getStatsData().isSetBinaryStats()) { + BinaryColumnStatsData binaryStats = statsObj.getStatsData() + .getBinaryStats(); + mColStats.setBinaryStats(binaryStats.getNumNulls(), + binaryStats.getMaxColLen(), binaryStats.getAvgColLen()); + } + return mColStats; + } - public static ColumnStatisticsObj getPartitionColumnStatisticsObj( - MPartitionColumnStatistics mStatsObj) { - ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); - statsObj.setColType(mStatsObj.getColType()); - statsObj.setColName(mStatsObj.getColName()); - String colType = mStatsObj.getColType().toLowerCase(); - ColumnStatisticsData colStatsData = new ColumnStatisticsData(); + public static ColumnStatisticsObj getPartitionColumnStatisticsObj( + MPartitionColumnStatistics mStatsObj) { + ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); + statsObj.setColType(mStatsObj.getColType()); + statsObj.setColName(mStatsObj.getColName()); + String colType = mStatsObj.getColType().toLowerCase(); + ColumnStatisticsData colStatsData = new ColumnStatisticsData(); - if (colType.equals("boolean")) { - BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); - boolStats.setNumFalses(mStatsObj.getNumFalses()); - boolStats.setNumTrues(mStatsObj.getNumTrues()); - boolStats.setNumNulls(mStatsObj.getNumNulls()); - colStatsData.setBooleanStats(boolStats); - } else if (colType.equals("string") || - colType.startsWith("varchar") || colType.startsWith("char")) { - StringColumnStatsData stringStats = new StringColumnStatsData(); - stringStats.setNumNulls(mStatsObj.getNumNulls()); - stringStats.setAvgColLen(mStatsObj.getAvgColLen()); - stringStats.setMaxColLen(mStatsObj.getMaxColLen()); - stringStats.setNumDVs(mStatsObj.getNumDVs()); - colStatsData.setStringStats(stringStats); - } else if (colType.equals("binary")) { - BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); - binaryStats.setNumNulls(mStatsObj.getNumNulls()); - binaryStats.setAvgColLen(mStatsObj.getAvgColLen()); - binaryStats.setMaxColLen(mStatsObj.getMaxColLen()); - colStatsData.setBinaryStats(binaryStats); - } else if (colType.equals("tinyint") || colType.equals("smallint") || - colType.equals("int") || colType.equals("bigint") || - colType.equals("timestamp")) { - LongColumnStatsData longStats = new LongColumnStatsData(); - longStats.setNumNulls(mStatsObj.getNumNulls()); - if (mStatsObj.getLongHighValue() != null) { - longStats.setHighValue(mStatsObj.getLongHighValue()); - } - if (mStatsObj.getLongLowValue() != null) { - longStats.setLowValue(mStatsObj.getLongLowValue()); - } - longStats.setNumDVs(mStatsObj.getNumDVs()); - colStatsData.setLongStats(longStats); - } else if (colType.equals("double") || colType.equals("float")) { - DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); - doubleStats.setNumNulls(mStatsObj.getNumNulls()); - if (mStatsObj.getDoubleHighValue() != null) { - doubleStats.setHighValue(mStatsObj.getDoubleHighValue()); - } - if (mStatsObj.getDoubleLowValue() != null) { - doubleStats.setLowValue(mStatsObj.getDoubleLowValue()); - } - doubleStats.setNumDVs(mStatsObj.getNumDVs()); - colStatsData.setDoubleStats(doubleStats); - } else if (colType.startsWith("decimal")) { - DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); - decimalStats.setNumNulls(mStatsObj.getNumNulls()); - if (mStatsObj.getDecimalHighValue() != null) { - decimalStats.setHighValue(createThriftDecimal(mStatsObj.getDecimalHighValue())); - } - if (mStatsObj.getDecimalLowValue() != null) { - decimalStats.setLowValue(createThriftDecimal(mStatsObj.getDecimalLowValue())); - } - decimalStats.setNumDVs(mStatsObj.getNumDVs()); - colStatsData.setDecimalStats(decimalStats); - } - statsObj.setStatsData(colStatsData); - return statsObj; - } + if (colType.equals("boolean")) { + BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); + boolStats.setNumFalses(mStatsObj.getNumFalses()); + boolStats.setNumTrues(mStatsObj.getNumTrues()); + boolStats.setNumNulls(mStatsObj.getNumNulls()); + colStatsData.setBooleanStats(boolStats); + } else if (colType.equals("string") || colType.startsWith("varchar") + || colType.startsWith("char")) { + StringColumnStatsData stringStats = new StringColumnStatsData(); + stringStats.setNumNulls(mStatsObj.getNumNulls()); + stringStats.setAvgColLen(mStatsObj.getAvgColLen()); + stringStats.setMaxColLen(mStatsObj.getMaxColLen()); + stringStats.setNumDVs(mStatsObj.getNumDVs()); + colStatsData.setStringStats(stringStats); + } else if (colType.equals("binary")) { + BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); + binaryStats.setNumNulls(mStatsObj.getNumNulls()); + binaryStats.setAvgColLen(mStatsObj.getAvgColLen()); + binaryStats.setMaxColLen(mStatsObj.getMaxColLen()); + colStatsData.setBinaryStats(binaryStats); + } else if (colType.equals("tinyint") || colType.equals("smallint") + || colType.equals("int") || colType.equals("bigint") + || colType.equals("timestamp")) { + LongColumnStatsData longStats = new LongColumnStatsData(); + longStats.setNumNulls(mStatsObj.getNumNulls()); + if (mStatsObj.getLongHighValue() != null) { + longStats.setHighValue(mStatsObj.getLongHighValue()); + } + if (mStatsObj.getLongLowValue() != null) { + longStats.setLowValue(mStatsObj.getLongLowValue()); + } + longStats.setNumDVs(mStatsObj.getNumDVs()); + colStatsData.setLongStats(longStats); + } else if (colType.equals("double") || colType.equals("float")) { + DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); + doubleStats.setNumNulls(mStatsObj.getNumNulls()); + if (mStatsObj.getDoubleHighValue() != null) { + doubleStats.setHighValue(mStatsObj.getDoubleHighValue()); + } + if (mStatsObj.getDoubleLowValue() != null) { + doubleStats.setLowValue(mStatsObj.getDoubleLowValue()); + } + doubleStats.setNumDVs(mStatsObj.getNumDVs()); + colStatsData.setDoubleStats(doubleStats); + } else if (colType.startsWith("decimal")) { + DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); + decimalStats.setNumNulls(mStatsObj.getNumNulls()); + if (mStatsObj.getDecimalHighValue() != null) { + decimalStats.setHighValue(createThriftDecimal(mStatsObj + .getDecimalHighValue())); + } + if (mStatsObj.getDecimalLowValue() != null) { + decimalStats.setLowValue(createThriftDecimal(mStatsObj + .getDecimalLowValue())); + } + decimalStats.setNumDVs(mStatsObj.getNumDVs()); + colStatsData.setDecimalStats(decimalStats); + } + statsObj.setStatsData(colStatsData); + return statsObj; + } - public static ColumnStatisticsDesc getPartitionColumnStatisticsDesc( - MPartitionColumnStatistics mStatsObj) { - ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); - statsDesc.setIsTblLevel(false); - statsDesc.setDbName(mStatsObj.getDbName()); - statsDesc.setTableName(mStatsObj.getTableName()); - statsDesc.setPartName(mStatsObj.getPartitionName()); - statsDesc.setLastAnalyzed(mStatsObj.getLastAnalyzed()); - return statsDesc; - } + public static ColumnStatisticsDesc getPartitionColumnStatisticsDesc( + MPartitionColumnStatistics mStatsObj) { + ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); + statsDesc.setIsTblLevel(false); + statsDesc.setDbName(mStatsObj.getDbName()); + statsDesc.setTableName(mStatsObj.getTableName()); + statsDesc.setPartName(mStatsObj.getPartitionName()); + statsDesc.setLastAnalyzed(mStatsObj.getLastAnalyzed()); + return statsDesc; + } - // SQL - public static void fillColumnStatisticsData(String colType, ColumnStatisticsData data, - Object llow, Object lhigh, Object dlow, Object dhigh, Object declow, Object dechigh, - Object nulls, Object dist, Object avglen, Object maxlen, Object trues, Object falses) { - if (colType.equals("boolean")) { - BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); - boolStats.setNumFalses((Long)falses); - boolStats.setNumTrues((Long)trues); - boolStats.setNumNulls((Long)nulls); - data.setBooleanStats(boolStats); - } else if (colType.equals("string") || - colType.startsWith("varchar") || colType.startsWith("char")) { - StringColumnStatsData stringStats = new StringColumnStatsData(); - stringStats.setNumNulls((Long)nulls); - stringStats.setAvgColLen((Double)avglen); - stringStats.setMaxColLen((Long)maxlen); - stringStats.setNumDVs((Long)dist); - data.setStringStats(stringStats); - } else if (colType.equals("binary")) { - BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); - binaryStats.setNumNulls((Long)nulls); - binaryStats.setAvgColLen((Double)avglen); - binaryStats.setMaxColLen((Long)maxlen); - data.setBinaryStats(binaryStats); - } else if (colType.equals("bigint") || colType.equals("int") || - colType.equals("smallint") || colType.equals("tinyint") || - colType.equals("timestamp")) { - LongColumnStatsData longStats = new LongColumnStatsData(); - longStats.setNumNulls((Long)nulls); - if (lhigh != null) { - longStats.setHighValue((Long)lhigh); - } - if (llow != null) { - longStats.setLowValue((Long)llow); - } - longStats.setNumDVs((Long)dist); - data.setLongStats(longStats); - } else if (colType.equals("double") || colType.equals("float")) { - DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); - doubleStats.setNumNulls((Long)nulls); - if (dhigh != null) { - doubleStats.setHighValue((Double)dhigh); - } - if (dlow != null) { - doubleStats.setLowValue((Double)dlow); - } - doubleStats.setNumDVs((Long)dist); - data.setDoubleStats(doubleStats); - } else if (colType.startsWith("decimal")) { - DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); - decimalStats.setNumNulls((Long)nulls); - if (dechigh != null) { - decimalStats.setHighValue(createThriftDecimal((String)dechigh)); - } - if (declow != null) { - decimalStats.setLowValue(createThriftDecimal((String)declow)); - } - decimalStats.setNumDVs((Long)dist); - data.setDecimalStats(decimalStats); - } - } + // SQL + public static void fillColumnStatisticsData(String colType, + ColumnStatisticsData data, Object llow, Object lhigh, Object dlow, + Object dhigh, Object declow, Object dechigh, Object nulls, + Object dist, Object avglen, Object maxlen, Object trues, + Object falses) { + if (colType.equals("boolean")) { + BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); + boolStats.setNumFalses((Long) falses); + boolStats.setNumTrues((Long) trues); + boolStats.setNumNulls((Long) nulls); + data.setBooleanStats(boolStats); + } else if (colType.equals("string") || colType.startsWith("varchar") + || colType.startsWith("char")) { + StringColumnStatsData stringStats = new StringColumnStatsData(); + stringStats.setNumNulls((Long) nulls); + stringStats.setAvgColLen((Double) avglen); + stringStats.setMaxColLen((Long) maxlen); + stringStats.setNumDVs((Long) dist); + data.setStringStats(stringStats); + } else if (colType.equals("binary")) { + BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); + binaryStats.setNumNulls((Long) nulls); + binaryStats.setAvgColLen((Double) avglen); + binaryStats.setMaxColLen((Long) maxlen); + data.setBinaryStats(binaryStats); + } else if (colType.equals("bigint") || colType.equals("int") + || colType.equals("smallint") || colType.equals("tinyint") + || colType.equals("timestamp")) { + LongColumnStatsData longStats = new LongColumnStatsData(); + longStats.setNumNulls((Long) nulls); + if (lhigh != null) { + longStats.setHighValue((Long) lhigh); + } + if (llow != null) { + longStats.setLowValue((Long) llow); + } + longStats.setNumDVs((Long) dist); + data.setLongStats(longStats); + } else if (colType.equals("double") || colType.equals("float")) { + DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); + doubleStats.setNumNulls((Long) nulls); + if (dhigh != null) { + doubleStats.setHighValue((Double) dhigh); + } + if (dlow != null) { + doubleStats.setLowValue((Double) dlow); + } + doubleStats.setNumDVs((Long) dist); + data.setDoubleStats(doubleStats); + } else if (colType.startsWith("decimal")) { + DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); + decimalStats.setNumNulls((Long) nulls); + if (dechigh != null) { + decimalStats + .setHighValue(createThriftDecimal((String) dechigh)); + } + if (declow != null) { + decimalStats.setLowValue(createThriftDecimal((String) declow)); + } + decimalStats.setNumDVs((Long) dist); + data.setDecimalStats(decimalStats); + } + } - private static Decimal createThriftDecimal(String s) { - BigDecimal d = new BigDecimal(s); - return new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), (short)d.scale()); - } + private static Decimal createThriftDecimal(String s) { + BigDecimal d = new BigDecimal(s); + return new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), + (short) d.scale()); + } - private static String createJdoDecimalString(Decimal d) { - return new BigDecimal(new BigInteger(d.getUnscaled()), d.getScale()).toString(); - } + private static String createJdoDecimalString(Decimal d) { + return new BigDecimal(new BigInteger(d.getUnscaled()), d.getScale()) + .toString(); + } } diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java index 89c31dc..c6a33a6 100644 --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java @@ -46,13 +46,13 @@ private Double doubleHighValue; private String decimalLowValue; private String decimalHighValue; - private long numNulls; - private long numDVs; - private double avgColLen; - private long maxColLen; - private long numTrues; - private long numFalses; - private long lastAnalyzed; + private Long numNulls; + private Long numDVs; + private Double avgColLen; + private Long maxColLen; + private Long numTrues; + private Long numFalses; + private Long lastAnalyzed; public MPartitionColumnStatistics() {} @@ -72,7 +72,7 @@ public void setColName(String colName) { this.colName = colName; } - public long getNumNulls() { + public Long getNumNulls() { return numNulls; } @@ -80,7 +80,7 @@ public void setNumNulls(long numNulls) { this.numNulls = numNulls; } - public long getNumDVs() { + public Long getNumDVs() { return numDVs; } @@ -88,7 +88,7 @@ public void setNumDVs(long numDVs) { this.numDVs = numDVs; } - public double getAvgColLen() { + public Double getAvgColLen() { return avgColLen; } @@ -96,7 +96,7 @@ public void setAvgColLen(double avgColLen) { this.avgColLen = avgColLen; } - public long getMaxColLen() { + public Long getMaxColLen() { return maxColLen; } @@ -104,7 +104,7 @@ public void setMaxColLen(long maxColLen) { this.maxColLen = maxColLen; } - public long getNumTrues() { + public Long getNumTrues() { return numTrues; } @@ -112,7 +112,7 @@ public void setNumTrues(long numTrues) { this.numTrues = numTrues; } - public long getNumFalses() { + public Long getNumFalses() { return numFalses; } @@ -120,7 +120,7 @@ public void setNumFalses(long numFalses) { this.numFalses = numFalses; } - public long getLastAnalyzed() { + public Long getLastAnalyzed() { return lastAnalyzed; } @@ -160,20 +160,20 @@ public void setColType(String colType) { this.colType = colType; } - public void setBooleanStats(long numTrues, long numFalses, long numNulls) { + public void setBooleanStats(Long numTrues, Long numFalses, Long numNulls) { this.numTrues = numTrues; this.numFalses = numFalses; this.numNulls = numNulls; } - public void setLongStats(long numNulls, long numNDVs, Long lowValue, Long highValue) { + public void setLongStats(Long numNulls, Long numNDVs, Long lowValue, Long highValue) { this.numNulls = numNulls; this.numDVs = numNDVs; this.longLowValue = lowValue; this.longHighValue = highValue; } - public void setDoubleStats(long numNulls, long numNDVs, Double lowValue, Double highValue) { + public void setDoubleStats(Long numNulls, Long numNDVs, Double lowValue, Double highValue) { this.numNulls = numNulls; this.numDVs = numNDVs; this.doubleLowValue = lowValue; @@ -181,21 +181,21 @@ public void setDoubleStats(long numNulls, long numNDVs, Double lowValue, Double } public void setDecimalStats( - long numNulls, long numNDVs, String lowValue, String highValue) { + Long numNulls, Long numNDVs, String lowValue, String highValue) { this.numNulls = numNulls; this.numDVs = numNDVs; this.decimalLowValue = lowValue; this.decimalHighValue = highValue; } - public void setStringStats(long numNulls, long numNDVs, long maxColLen, double avgColLen) { + public void setStringStats(Long numNulls, Long numNDVs, Long maxColLen, Double avgColLen) { this.numNulls = numNulls; this.numDVs = numNDVs; this.maxColLen = maxColLen; this.avgColLen = avgColLen; } - public void setBinaryStats(long numNulls, long maxColLen, double avgColLen) { + public void setBinaryStats(Long numNulls, Long maxColLen, Double avgColLen) { this.numNulls = numNulls; this.maxColLen = maxColLen; this.avgColLen = avgColLen; diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java index 44bbab5..98e77ad 100644 --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java @@ -21,222 +21,223 @@ */ package org.apache.hadoop.hive.metastore.model; - - /** * - * MTableColumnStatistics - Represents Hive's Column Statistics Description. The fields in this - * class with the exception of table are persisted in the metastore. In case of table, tbl_id is - * persisted in its place. + * MTableColumnStatistics - Represents Hive's Column Statistics Description. The + * fields in this class with the exception of table are persisted in the + * metastore. In case of table, tbl_id is persisted in its place. * */ public class MTableColumnStatistics { - private MTable table; - private String dbName; - private String tableName; - private String colName; - private String colType; - - private Long longLowValue; - private Long longHighValue; - private Double doubleLowValue; - private Double doubleHighValue; - private String decimalLowValue; - private String decimalHighValue; - private long numNulls; - private long numDVs; - private double avgColLen; - private long maxColLen; - private long numTrues; - private long numFalses; - private long lastAnalyzed; - - public MTableColumnStatistics() {} - - public MTable getTable() { - return table; - } - - public void setTable(MTable table) { - this.table = table; - } - - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public String getColName() { - return colName; - } - - public void setColName(String colName) { - this.colName = colName; - } - - public String getColType() { - return colType; - } - - public void setColType(String colType) { - this.colType = colType; - } - - public long getNumNulls() { - return numNulls; - } - - public void setNumNulls(long numNulls) { - this.numNulls = numNulls; - } - - public long getNumDVs() { - return numDVs; - } - - public void setNumDVs(long numDVs) { - this.numDVs = numDVs; - } - - public double getAvgColLen() { - return avgColLen; - } - - public void setAvgColLen(double avgColLen) { - this.avgColLen = avgColLen; - } - - public long getMaxColLen() { - return maxColLen; - } - - public void setMaxColLen(long maxColLen) { - this.maxColLen = maxColLen; - } - - public long getNumTrues() { - return numTrues; - } - - public void setNumTrues(long numTrues) { - this.numTrues = numTrues; - } - - public long getNumFalses() { - return numFalses; - } - - public void setNumFalses(long numFalses) { - this.numFalses = numFalses; - } - - public long getLastAnalyzed() { - return lastAnalyzed; - } - - public void setLastAnalyzed(long lastAnalyzed) { - this.lastAnalyzed = lastAnalyzed; - } - - public String getDbName() { - return dbName; - } - - public void setDbName(String dbName) { - this.dbName = dbName; - } - - public void setBooleanStats(long numTrues, long numFalses, long numNulls) { - this.numTrues = numTrues; - this.numFalses = numFalses; - this.numNulls = numNulls; - } - - public void setLongStats(long numNulls, long numNDVs, Long lowValue, Long highValue) { - this.numNulls = numNulls; - this.numDVs = numNDVs; - this.longLowValue = lowValue; - this.longHighValue = highValue; - } - - public void setDoubleStats(long numNulls, long numNDVs, Double lowValue, Double highValue) { - this.numNulls = numNulls; - this.numDVs = numNDVs; - this.doubleLowValue = lowValue; - this.doubleHighValue = highValue; - } - - public void setDecimalStats( - long numNulls, long numNDVs, String lowValue, String highValue) { - this.numNulls = numNulls; - this.numDVs = numNDVs; - this.decimalLowValue = lowValue; - this.decimalHighValue = highValue; - } - - public void setStringStats(long numNulls, long numNDVs, long maxColLen, double avgColLen) { - this.numNulls = numNulls; - this.numDVs = numNDVs; - this.maxColLen = maxColLen; - this.avgColLen = avgColLen; - } - - public void setBinaryStats(long numNulls, long maxColLen, double avgColLen) { - this.numNulls = numNulls; - this.maxColLen = maxColLen; - this.avgColLen = avgColLen; - } - - public Long getLongLowValue() { - return longLowValue; - } - - public void setLongLowValue(long longLowValue) { - this.longLowValue = longLowValue; - } - - public Long getLongHighValue() { - return longHighValue; - } - - public void setLongHighValue(long longHighValue) { - this.longHighValue = longHighValue; - } - - public Double getDoubleLowValue() { - return doubleLowValue; - } - - public void setDoubleLowValue(double doubleLowValue) { - this.doubleLowValue = doubleLowValue; - } - - public Double getDoubleHighValue() { - return doubleHighValue; - } - - public void setDoubleHighValue(double doubleHighValue) { - this.doubleHighValue = doubleHighValue; - } - - - public String getDecimalLowValue() { - return decimalLowValue; - } - - public void setDecimalLowValue(String decimalLowValue) { - this.decimalLowValue = decimalLowValue; - } - - public String getDecimalHighValue() { - return decimalHighValue; - } - - public void setDecimalHighValue(String decimalHighValue) { - this.decimalHighValue = decimalHighValue; - } + private MTable table; + private String dbName; + private String tableName; + private String colName; + private String colType; + + private Long longLowValue; + private Long longHighValue; + private Double doubleLowValue; + private Double doubleHighValue; + private String decimalLowValue; + private String decimalHighValue; + private Long numNulls; + private Long numDVs; + private Double avgColLen; + private Long maxColLen; + private Long numTrues; + private Long numFalses; + private Long lastAnalyzed; + + public MTableColumnStatistics() { + } + + public MTable getTable() { + return table; + } + + public void setTable(MTable table) { + this.table = table; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public String getColName() { + return colName; + } + + public void setColName(String colName) { + this.colName = colName; + } + + public String getColType() { + return colType; + } + + public void setColType(String colType) { + this.colType = colType; + } + + public Long getNumNulls() { + return numNulls; + } + + public void setNumNulls(long numNulls) { + this.numNulls = numNulls; + } + + public Long getNumDVs() { + return numDVs; + } + + public void setNumDVs(long numDVs) { + this.numDVs = numDVs; + } + + public Double getAvgColLen() { + return avgColLen; + } + + public void setAvgColLen(double avgColLen) { + this.avgColLen = avgColLen; + } + + public Long getMaxColLen() { + return maxColLen; + } + + public void setMaxColLen(long maxColLen) { + this.maxColLen = maxColLen; + } + + public Long getNumTrues() { + return numTrues; + } + + public void setNumTrues(long numTrues) { + this.numTrues = numTrues; + } + + public Long getNumFalses() { + return numFalses; + } + + public void setNumFalses(long numFalses) { + this.numFalses = numFalses; + } + + public Long getLastAnalyzed() { + return lastAnalyzed; + } + + public void setLastAnalyzed(long lastAnalyzed) { + this.lastAnalyzed = lastAnalyzed; + } + + public String getDbName() { + return dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void setBooleanStats(Long numTrues, Long numFalses, Long numNulls) { + this.numTrues = numTrues; + this.numFalses = numFalses; + this.numNulls = numNulls; + } + + public void setLongStats(Long numNulls, Long numNDVs, Long lowValue, + Long highValue) { + this.numNulls = numNulls; + this.numDVs = numNDVs; + this.longLowValue = lowValue; + this.longHighValue = highValue; + } + + public void setDoubleStats(Long numNulls, Long numNDVs, Double lowValue, + Double highValue) { + this.numNulls = numNulls; + this.numDVs = numNDVs; + this.doubleLowValue = lowValue; + this.doubleHighValue = highValue; + } + + public void setDecimalStats(Long numNulls, Long numNDVs, String lowValue, + String highValue) { + this.numNulls = numNulls; + this.numDVs = numNDVs; + this.decimalLowValue = lowValue; + this.decimalHighValue = highValue; + } + + public void setStringStats(Long numNulls, Long numNDVs, Long maxColLen, + Double avgColLen) { + this.numNulls = numNulls; + this.numDVs = numNDVs; + this.maxColLen = maxColLen; + this.avgColLen = avgColLen; + } + + public void setBinaryStats(Long numNulls, Long maxColLen, Double avgColLen) { + this.numNulls = numNulls; + this.maxColLen = maxColLen; + this.avgColLen = avgColLen; + } + + public Long getLongLowValue() { + return longLowValue; + } + + public void setLongLowValue(long longLowValue) { + this.longLowValue = longLowValue; + } + + public Long getLongHighValue() { + return longHighValue; + } + + public void setLongHighValue(long longHighValue) { + this.longHighValue = longHighValue; + } + + public Double getDoubleLowValue() { + return doubleLowValue; + } + + public void setDoubleLowValue(double doubleLowValue) { + this.doubleLowValue = doubleLowValue; + } + + public Double getDoubleHighValue() { + return doubleHighValue; + } + + public void setDoubleHighValue(double doubleHighValue) { + this.doubleHighValue = doubleHighValue; + } + + public String getDecimalLowValue() { + return decimalLowValue; + } + + public void setDecimalLowValue(String decimalLowValue) { + this.decimalLowValue = decimalLowValue; + } + + public String getDecimalHighValue() { + return decimalHighValue; + } + + public void setDecimalHighValue(String decimalHighValue) { + this.decimalHighValue = decimalHighValue; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index ac76214..2d79896 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -115,1528 +115,1689 @@ public class Driver implements CommandProcessor { - static final private String CLASS_NAME = Driver.class.getName(); - static final private Log LOG = LogFactory.getLog(CLASS_NAME); - static final private LogHelper console = new LogHelper(LOG); - - private static final Object compileMonitor = new Object(); - - private int maxRows = 100; - ByteStream.Output bos = new ByteStream.Output(); - - private HiveConf conf; - private DataInput resStream; - private Context ctx; - private DriverContext driverCxt; - private QueryPlan plan; - private Schema schema; - private String errorMessage; - private String SQLState; - private Throwable downstreamError; - private HiveTxnManager txnMgr; - - // A limit on the number of threads that can be launched - private int maxthreads; - private int tryCount = Integer.MAX_VALUE; - - private boolean destroyed; - - private String userName; - - private void createTxnManager() throws SemanticException { - if (txnMgr == null) { - try { - txnMgr = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); - } catch (LockException e) { - throw new SemanticException(e.getMessage(), e); - } - } - } - - private boolean checkConcurrency() throws SemanticException { - boolean supportConcurrency = conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); - if (!supportConcurrency) { - LOG.info("Concurrency mode is disabled, not creating a lock manager"); - return false; - } - return true; - } - - @Override - public void init() { - Operator.resetId(); - } - - /** - * Return the status information about the Map-Reduce cluster - */ - public ClusterStatus getClusterStatus() throws Exception { - ClusterStatus cs; - try { - JobConf job = new JobConf(conf); - JobClient jc = new JobClient(job); - cs = jc.getClusterStatus(); - } catch (Exception e) { - e.printStackTrace(); - throw e; - } - LOG.info("Returning cluster status: " + cs.toString()); - return cs; - } - - - public Schema getSchema() { - return schema; - } - - /** - * Get a Schema with fields represented with native Hive types - */ - public static Schema getSchema(BaseSemanticAnalyzer sem, HiveConf conf) { - Schema schema = null; - - // If we have a plan, prefer its logical result schema if it's - // available; otherwise, try digging out a fetch task; failing that, - // give up. - if (sem == null) { - // can't get any info without a plan - } else if (sem.getResultSchema() != null) { - List lst = sem.getResultSchema(); - schema = new Schema(lst, null); - } else if (sem.getFetchTask() != null) { - FetchTask ft = sem.getFetchTask(); - TableDesc td = ft.getTblDesc(); - // partitioned tables don't have tableDesc set on the FetchTask. Instead - // they have a list of PartitionDesc objects, each with a table desc. - // Let's - // try to fetch the desc for the first partition and use it's - // deserializer. - if (td == null && ft.getWork() != null && ft.getWork().getPartDesc() != null) { - if (ft.getWork().getPartDesc().size() > 0) { - td = ft.getWork().getPartDesc().get(0).getTableDesc(); - } - } - - if (td == null) { - LOG.info("No returning schema."); - } else { - String tableName = "result"; - List lst = null; - try { - lst = MetaStoreUtils.getFieldsFromDeserializer(tableName, td.getDeserializer()); - } catch (Exception e) { - LOG.warn("Error getting schema: " - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - } - if (lst != null) { - schema = new Schema(lst, null); - } - } - } - if (schema == null) { - schema = new Schema(); - } - LOG.info("Returning Hive schema: " + schema); - return schema; - } - - /** - * Get a Schema with fields represented with Thrift DDL types - */ - public Schema getThriftSchema() throws Exception { - Schema schema; - try { - schema = getSchema(); - if (schema != null) { - List lst = schema.getFieldSchemas(); - // Go over the schema and convert type to thrift type - if (lst != null) { - for (FieldSchema f : lst) { - f.setType(MetaStoreUtils.typeToThriftType(f.getType())); - } - } - } - } catch (Exception e) { - e.printStackTrace(); - throw e; - } - LOG.info("Returning Thrift schema: " + schema); - return schema; - } - - /** - * Return the maximum number of rows returned by getResults - */ - public int getMaxRows() { - return maxRows; - } - - /** - * Set the maximum number of rows returned by getResults - */ - public void setMaxRows(int maxRows) { - this.maxRows = maxRows; - } - - /** - * for backwards compatibility with current tests - */ - public Driver(HiveConf conf) { - this.conf = conf; - } - - public Driver(HiveConf conf, String userName) { - this(conf); - this.userName = userName; - } - - public Driver() { - if (SessionState.get() != null) { - conf = SessionState.get().getConf(); - } - } - - /** - * Compile a new query. Any currently-planned query associated with this Driver is discarded. - * Do not reset id for inner queries(index, etc). Task ids are used for task identity check. - * - * @param command - * The SQL query to compile. - */ - public int compile(String command) { - return compile(command, true); - } - - /** - * Hold state variables specific to each query being executed, that may not - * be consistent in the overall SessionState - */ - private static class QueryState { - private HiveOperation op; - private String cmd; - private boolean init = false; - - /** - * Initialize the queryState with the query state variables - */ - public void init(HiveOperation op, String cmd) { - this.op = op; - this.cmd = cmd; - this.init = true; - } - - public boolean isInitialized() { - return this.init; - } - - public HiveOperation getOp() { - return this.op; - } - - public String getCmd() { - return this.cmd; - } - } - - public void saveSession(QueryState qs) { - SessionState oldss = SessionState.get(); - if (oldss != null && oldss.getHiveOperation() != null) { - qs.init(oldss.getHiveOperation(), oldss.getCmd()); - } - } - - public void restoreSession(QueryState qs) { - SessionState ss = SessionState.get(); - if (ss != null && qs != null && qs.isInitialized()) { - ss.setCmd(qs.getCmd()); - ss.setCommandType(qs.getOp()); - } - } - - /** - * Compile a new query, but potentially reset taskID counter. Not resetting task counter - * is useful for generating re-entrant QL queries. - * @param command The HiveQL query to compile - * @param resetTaskIds Resets taskID counter if true. - * @return 0 for ok - */ - public int compile(String command, boolean resetTaskIds) { - PerfLogger perfLogger = PerfLogger.getPerfLogger(); - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE); - - //holder for parent command type/string when executing reentrant queries - QueryState queryState = new QueryState(); - - if (plan != null) { - close(); - plan = null; - } - - if (resetTaskIds) { - TaskFactory.resetId(); - } - saveSession(queryState); - - // generate new query id - String queryId = QueryPlan.makeQueryId(); - conf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); - - try { - command = new VariableSubstitution().substitute(conf,command); - ctx = new Context(conf); - ctx.setTryCount(getTryCount()); - ctx.setCmd(command); - ctx.setHDFSCleanup(true); - - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE); - ParseDriver pd = new ParseDriver(); - ASTNode tree = pd.parse(command, ctx); - tree = ParseUtils.findRootNonNullToken(tree); - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE); - - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE); - BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(conf, tree); - List saHooks = - getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, - HiveSemanticAnalyzerHook.class); - - // Do semantic analysis and plan generation - if (saHooks != null) { - HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl(); - hookCtx.setConf(conf); - hookCtx.setUserName(userName); - for (HiveSemanticAnalyzerHook hook : saHooks) { - tree = hook.preAnalyze(hookCtx, tree); - } - sem.analyze(tree, ctx); - hookCtx.update(sem); - for (HiveSemanticAnalyzerHook hook : saHooks) { - hook.postAnalyze(hookCtx, sem.getRootTasks()); - } - } else { - sem.analyze(tree, ctx); - } - - LOG.info("Semantic Analysis Completed"); - - // validate the plan - sem.validate(); - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE); - - plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId); - - String queryStr = plan.getQueryStr(); - conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr); - - conf.set("mapreduce.workflow.id", "hive_" + queryId); - conf.set("mapreduce.workflow.name", queryStr); - - // initialize FetchTask right here - if (plan.getFetchTask() != null) { - plan.getFetchTask().initialize(conf, plan, null); - } - - // get the output schema - schema = getSchema(sem, conf); - - //do the authorization check - if (!sem.skipAuthorization() && - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) { - - try { - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION); - doAuthorization(sem); - } catch (AuthorizationException authExp) { - console.printError("Authorization failed:" + authExp.getMessage() - + ". Use SHOW GRANT to get more details."); - errorMessage = authExp.getMessage(); - SQLState = "42000"; - return 403; - } finally { - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION); - } - } - - return 0; - } catch (Exception e) { - ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage()); - errorMessage = "FAILED: " + e.getClass().getSimpleName(); - if (error != ErrorMsg.GENERIC_ERROR) { - errorMessage += " [Error " + error.getErrorCode() + "]:"; - } - - // HIVE-4889 - if ((e instanceof IllegalArgumentException) && e.getMessage() == null && e.getCause() != null) { - errorMessage += " " + e.getCause().getMessage(); - } else { - errorMessage += " " + e.getMessage(); - } - - SQLState = error.getSQLState(); - downstreamError = e; - console.printError(errorMessage, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return error.getErrorCode(); - } finally { - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE); - restoreSession(queryState); - } - } - - public static void doAuthorization(BaseSemanticAnalyzer sem) - throws HiveException, AuthorizationException { - HashSet inputs = sem.getInputs(); - HashSet outputs = sem.getOutputs(); - SessionState ss = SessionState.get(); - HiveOperation op = ss.getHiveOperation(); - Hive db = sem.getDb(); - if (ss.isAuthorizationModeV2()) { - doAuthorizationV2(ss, op, inputs, outputs); - return; - } - if (op == null) { - throw new HiveException("Operation should not be null"); - } - HiveAuthorizationProvider authorizer = ss.getAuthorizer(); - if (op.equals(HiveOperation.CREATEDATABASE)) { - authorizer.authorize( - op.getInputRequiredPrivileges(), op.getOutputRequiredPrivileges()); - } else if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) - || op.equals(HiveOperation.CREATETABLE)) { - authorizer.authorize( - db.getDatabase(SessionState.get().getCurrentDatabase()), null, - HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges()); - } else { - if (op.equals(HiveOperation.IMPORT)) { - ImportSemanticAnalyzer isa = (ImportSemanticAnalyzer) sem; - if (!isa.existsTable()) { - authorizer.authorize( - db.getDatabase(SessionState.get().getCurrentDatabase()), null, - HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges()); - } - } - } - if (outputs != null && outputs.size() > 0) { - for (WriteEntity write : outputs) { - if (write.isDummy()) { - continue; - } - if (write.getType() == Entity.Type.DATABASE) { - authorizer.authorize(write.getDatabase(), - null, op.getOutputRequiredPrivileges()); - continue; - } - - if (write.getType() == WriteEntity.Type.PARTITION) { - Partition part = db.getPartition(write.getTable(), write - .getPartition().getSpec(), false); - if (part != null) { - authorizer.authorize(write.getPartition(), null, - op.getOutputRequiredPrivileges()); - continue; - } - } - - if (write.getTable() != null) { - authorizer.authorize(write.getTable(), null, - op.getOutputRequiredPrivileges()); - } - } - } - - if (inputs != null && inputs.size() > 0) { - Map> tab2Cols = new HashMap>(); - Map> part2Cols = new HashMap>(); - - //determine if partition level privileges should be checked for input tables - Map tableUsePartLevelAuth = new HashMap(); - for (ReadEntity read : inputs) { - if (read.isDummy() || read.getType() == Entity.Type.DATABASE) { - continue; - } - Table tbl = read.getTable(); - if ((read.getPartition() != null) || (tbl != null && tbl.isPartitioned())) { - String tblName = tbl.getTableName(); - if (tableUsePartLevelAuth.get(tblName) == null) { - boolean usePartLevelPriv = (tbl.getParameters().get( - "PARTITION_LEVEL_PRIVILEGE") != null && ("TRUE" - .equalsIgnoreCase(tbl.getParameters().get( - "PARTITION_LEVEL_PRIVILEGE")))); - if (usePartLevelPriv) { - tableUsePartLevelAuth.put(tblName, Boolean.TRUE); - } else { - tableUsePartLevelAuth.put(tblName, Boolean.FALSE); - } - } - } - } - - //for a select or create-as-select query, populate the partition to column (par2Cols) or - // table to columns mapping (tab2Cols) - if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) - || op.equals(HiveOperation.QUERY)) { - SemanticAnalyzer querySem = (SemanticAnalyzer) sem; - ParseContext parseCtx = querySem.getParseContext(); - Map tsoTopMap = parseCtx.getTopToTable(); - - for (Map.Entry> topOpMap : querySem - .getParseContext().getTopOps().entrySet()) { - Operator topOp = topOpMap.getValue(); - if (topOp instanceof TableScanOperator - && tsoTopMap.containsKey(topOp)) { - TableScanOperator tableScanOp = (TableScanOperator) topOp; - Table tbl = tsoTopMap.get(tableScanOp); - List neededColumnIds = tableScanOp.getNeededColumnIDs(); - List columns = tbl.getCols(); - List cols = new ArrayList(); - for (int i = 0; i < neededColumnIds.size(); i++) { - cols.add(columns.get(neededColumnIds.get(i)).getName()); - } - //map may not contain all sources, since input list may have been optimized out - //or non-existent tho such sources may still be referenced by the TableScanOperator - //if it's null then the partition probably doesn't exist so let's use table permission - if (tbl.isPartitioned() && - tableUsePartLevelAuth.get(tbl.getTableName()) == Boolean.TRUE) { - String alias_id = topOpMap.getKey(); - - PrunedPartitionList partsList = PartitionPruner.prune(tableScanOp, - parseCtx, alias_id); - Set parts = partsList.getPartitions(); - for (Partition part : parts) { - List existingCols = part2Cols.get(part); - if (existingCols == null) { - existingCols = new ArrayList(); - } - existingCols.addAll(cols); - part2Cols.put(part, existingCols); - } - } else { - List existingCols = tab2Cols.get(tbl); - if (existingCols == null) { - existingCols = new ArrayList(); - } - existingCols.addAll(cols); - tab2Cols.put(tbl, existingCols); - } - } - } - } - - // cache the results for table authorization - Set tableAuthChecked = new HashSet(); - for (ReadEntity read : inputs) { - if (read.isDummy()) { - continue; - } - if (read.getType() == Entity.Type.DATABASE) { - authorizer.authorize(read.getDatabase(), op.getInputRequiredPrivileges(), null); - continue; - } - Table tbl = read.getTable(); - if (read.getPartition() != null) { - Partition partition = read.getPartition(); - tbl = partition.getTable(); - // use partition level authorization - if (tableUsePartLevelAuth.get(tbl.getTableName()) == Boolean.TRUE) { - List cols = part2Cols.get(partition); - if (cols != null && cols.size() > 0) { - authorizer.authorize(partition.getTable(), - partition, cols, op.getInputRequiredPrivileges(), - null); - } else { - authorizer.authorize(partition, - op.getInputRequiredPrivileges(), null); - } - continue; - } - } - - // if we reach here, it means it needs to do a table authorization - // check, and the table authorization may already happened because of other - // partitions - if (tbl != null && !tableAuthChecked.contains(tbl.getTableName()) && - !(tableUsePartLevelAuth.get(tbl.getTableName()) == Boolean.TRUE)) { - List cols = tab2Cols.get(tbl); - if (cols != null && cols.size() > 0) { - authorizer.authorize(tbl, null, cols, - op.getInputRequiredPrivileges(), null); - } else { - authorizer.authorize(tbl, op.getInputRequiredPrivileges(), - null); - } - tableAuthChecked.add(tbl.getTableName()); - } - } - - } - } - - private static void doAuthorizationV2(SessionState ss, HiveOperation op, HashSet inputs, - HashSet outputs) throws HiveException { - HiveOperationType hiveOpType = getHiveOperationType(op); - List inputsHObjs = getHivePrivObjects(inputs); - List outputHObjs = getHivePrivObjects(outputs); - ss.getAuthorizerV2().checkPrivileges(hiveOpType, inputsHObjs, outputHObjs); - return; - } - - private static List getHivePrivObjects(HashSet privObjects) { - List hivePrivobjs = new ArrayList(); - if(privObjects == null){ - return hivePrivobjs; - } - for(Entity privObject : privObjects){ - HivePrivilegeObjectType privObjType = - AuthorizationUtils.getHivePrivilegeObjectType(privObject.getType()); - - if(privObject instanceof ReadEntity && !((ReadEntity)privObject).isDirect()){ - // In case of views, the underlying views or tables are not direct dependencies - // and are not used for authorization checks. - // This ReadEntity represents one of the underlying tables/views, so skip it. - // See description of the isDirect in ReadEntity - continue; - } - if(privObject instanceof WriteEntity && ((WriteEntity)privObject).isTempURI()){ - //do not authorize temporary uris - continue; - } - - //support for authorization on partitions needs to be added - String dbname = null; - String tableURI = null; - switch(privObject.getType()){ - case DATABASE: - dbname = privObject.getDatabase() == null ? null : privObject.getDatabase().getName(); - break; - case TABLE: - dbname = privObject.getTable() == null ? null : privObject.getTable().getDbName(); - tableURI = privObject.getTable() == null ? null : privObject.getTable().getTableName(); - break; - case DFS_DIR: - case LOCAL_DIR: - tableURI = privObject.getD(); - break; - case DUMMYPARTITION: - case PARTITION: - // not currently handled - continue; - default: - throw new AssertionError("Unexpected object type"); - } - HivePrivObjectActionType actionType = AuthorizationUtils.getActionType(privObject); - HivePrivilegeObject hPrivObject = new HivePrivilegeObject(privObjType, dbname, tableURI, - actionType); - hivePrivobjs.add(hPrivObject); - } - return hivePrivobjs; - } - - private static HiveOperationType getHiveOperationType(HiveOperation op) { - return HiveOperationType.valueOf(op.name()); - } - - /** - * @return The current query plan associated with this Driver, if any. - */ - public QueryPlan getPlan() { - return plan; - } - - /** - * @param d - * The database to be locked - * @param t - * The table to be locked - * @param p - * The partition to be locked - * @param mode - * The mode of the lock (SHARED/EXCLUSIVE) Get the list of objects to be locked. If a - * partition needs to be locked (in any mode), all its parents should also be locked in - * SHARED mode. - */ - private List getLockObjects(Database d, Table t, Partition p, HiveLockMode mode) - throws SemanticException { - List locks = new LinkedList(); - - HiveLockObjectData lockData = - new HiveLockObjectData(plan.getQueryId(), - String.valueOf(System.currentTimeMillis()), - "IMPLICIT", - plan.getQueryStr()); - if (d != null) { - locks.add(new HiveLockObj(new HiveLockObject(d.getName(), lockData), mode)); - return locks; - } - - if (t != null) { - locks.add(new HiveLockObj(new HiveLockObject(t.getDbName(), lockData), mode)); - locks.add(new HiveLockObj(new HiveLockObject(t, lockData), mode)); - mode = HiveLockMode.SHARED; - locks.add(new HiveLockObj(new HiveLockObject(t.getDbName(), lockData), mode)); - return locks; - } - - if (p != null) { - locks.add(new HiveLockObj(new HiveLockObject(p.getTable().getDbName(), lockData), mode)); - if (!(p instanceof DummyPartition)) { - locks.add(new HiveLockObj(new HiveLockObject(p, lockData), mode)); - } - - // All the parents are locked in shared mode - mode = HiveLockMode.SHARED; - - // For dummy partitions, only partition name is needed - String name = p.getName(); - - if (p instanceof DummyPartition) { - name = p.getName().split("@")[2]; - } - - String partialName = ""; - String[] partns = name.split("/"); - int len = p instanceof DummyPartition ? partns.length : partns.length - 1; - Map partialSpec = new LinkedHashMap(); - for (int idx = 0; idx < len; idx++) { - String partn = partns[idx]; - partialName += partn; - String[] nameValue = partn.split("="); - assert(nameValue.length == 2); - partialSpec.put(nameValue[0], nameValue[1]); - try { - locks.add(new HiveLockObj( - new HiveLockObject(new DummyPartition(p.getTable(), p.getTable().getDbName() - + "/" + p.getTable().getTableName() - + "/" + partialName, - partialSpec), lockData), mode)); - partialName += "/"; - } catch (HiveException e) { - throw new SemanticException(e.getMessage()); - } - } - - locks.add(new HiveLockObj(new HiveLockObject(p.getTable(), lockData), mode)); - locks.add(new HiveLockObj(new HiveLockObject(p.getTable().getDbName(), lockData), mode)); - } - - return locks; - } - - // Write the current set of valid transactions into the conf file so that it can be read by - // the input format. - private int recordValidTxns() { - try { - ValidTxnList txns = txnMgr.getValidTxns(); - conf.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); - return 0; - } catch (LockException e) { - errorMessage = "FAILED: Error in determing valid transactions: " + e.getMessage(); - SQLState = ErrorMsg.findSQLState(e.getMessage()); - downstreamError = e; - console.printError(errorMessage, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return 10; - } - } - - /** - * Acquire read and write locks needed by the statement. The list of objects to be locked are - * obtained from he inputs and outputs populated by the compiler. The lock acuisition scheme is - * pretty simple. If all the locks cannot be obtained, error out. Deadlock is avoided by making - * sure that the locks are lexicographically sorted. - **/ - private int acquireReadWriteLocks() { - PerfLogger perfLogger = PerfLogger.getPerfLogger(); - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS); - - - try { - txnMgr.acquireLocks(plan, ctx, userName); - return 0; - } catch (LockException e) { - errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage(); - SQLState = ErrorMsg.findSQLState(e.getMessage()); - downstreamError = e; - console.printError(errorMessage, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return 10; - } finally { - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS); - } - } - - /** - * @param hiveLocks - * list of hive locks to be released Release all the locks specified. If some of the - * locks have already been released, ignore them - **/ - private void releaseLocks(List hiveLocks) throws LockException { - PerfLogger perfLogger = PerfLogger.getPerfLogger(); - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS); - - if (hiveLocks != null) { - ctx.getHiveTxnManager().getLockManager().releaseLocks(hiveLocks); - } - ctx.setHiveLocks(null); - - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS); - } - - @Override - public CommandProcessorResponse run(String command) - throws CommandNeedRetryException { - return run(command, false); - } - - public CommandProcessorResponse run() - throws CommandNeedRetryException { - return run(null, true); - } - - public CommandProcessorResponse run(String command, boolean alreadyCompiled) - throws CommandNeedRetryException { - CommandProcessorResponse cpr = runInternal(command, alreadyCompiled); - if(cpr.getResponseCode() == 0) { - return cpr; - } - SessionState ss = SessionState.get(); - if(ss == null) { - return cpr; - } - MetaDataFormatter mdf = MetaDataFormatUtils.getFormatter(ss.getConf()); - if(!(mdf instanceof JsonMetaDataFormatter)) { - return cpr; - } - /*Here we want to encode the error in machine readable way (e.g. JSON) - * Ideally, errorCode would always be set to a canonical error defined in ErrorMsg. - * In practice that is rarely the case, so the messy logic below tries to tease - * out canonical error code if it can. Exclude stack trace from output when - * the error is a specific/expected one. - * It's written to stdout for backward compatibility (WebHCat consumes it).*/ - try { - if(downstreamError == null) { - mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState); - return cpr; - } - ErrorMsg canonicalErr = ErrorMsg.getErrorMsg(cpr.getResponseCode()); - if(canonicalErr != null && canonicalErr != ErrorMsg.GENERIC_ERROR) { - /*Some HiveExceptions (e.g. SemanticException) don't set - canonical ErrorMsg explicitly, but there is logic - (e.g. #compile()) to find an appropriate canonical error and - return its code as error code. In this case we want to - preserve it for downstream code to interpret*/ - mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState, null); - return cpr; - } - if(downstreamError instanceof HiveException) { - HiveException rc = (HiveException) downstreamError; - mdf.error(ss.out, errorMessage, - rc.getCanonicalErrorMsg().getErrorCode(), SQLState, - rc.getCanonicalErrorMsg() == ErrorMsg.GENERIC_ERROR ? - org.apache.hadoop.util.StringUtils.stringifyException(rc) - : null); - } - else { - ErrorMsg canonicalMsg = - ErrorMsg.getErrorMsg(downstreamError.getMessage()); - mdf.error(ss.out, errorMessage, canonicalMsg.getErrorCode(), - SQLState, org.apache.hadoop.util.StringUtils. - stringifyException(downstreamError)); - } - } - catch(HiveException ex) { - console.printError("Unable to JSON-encode the error", - org.apache.hadoop.util.StringUtils.stringifyException(ex)); - } - return cpr; - } - - public CommandProcessorResponse compileAndRespond(String command) { - return createProcessorResponse(compileInternal(command)); - } - - private int compileInternal(String command) { - int ret; - synchronized (compileMonitor) { - ret = compile(command); - } - if (ret != 0) { - try { - releaseLocks(ctx.getHiveLocks()); - } catch (LockException e) { - LOG.warn("Exception in releasing locks. " - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - } - } - return ret; - } - - private CommandProcessorResponse runInternal(String command, boolean alreadyCompiled) - throws CommandNeedRetryException { - errorMessage = null; - SQLState = null; - downstreamError = null; - - if (!validateConfVariables()) { - return createProcessorResponse(12); - } - - HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(conf, command); - // Get all the driver run hooks and pre-execute them. - List driverRunHooks; - try { - driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, - HiveDriverRunHook.class); - for (HiveDriverRunHook driverRunHook : driverRunHooks) { - driverRunHook.preDriverRun(hookContext); - } - } catch (Exception e) { - errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); - SQLState = ErrorMsg.findSQLState(e.getMessage()); - downstreamError = e; - console.printError(errorMessage + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return createProcessorResponse(12); - } - - // Reset the perf logger - PerfLogger perfLogger = PerfLogger.getPerfLogger(true); - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN); - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TIME_TO_SUBMIT); - - boolean requireLock = false; - boolean ckLock = false; - try { - ckLock = checkConcurrency(); - createTxnManager(); - } catch (SemanticException e) { - errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage(); - SQLState = ErrorMsg.findSQLState(e.getMessage()); - downstreamError = e; - console.printError(errorMessage, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return createProcessorResponse(10); - } - int ret = recordValidTxns(); - if (ret != 0) { - return createProcessorResponse(ret); - } - - if (!alreadyCompiled) { - ret = compileInternal(command); - if (ret != 0) { - return createProcessorResponse(ret); - } - } - - // the reason that we set the txn manager for the cxt here is because each - // query has its own ctx object. The txn mgr is shared across the - // same instance of Driver, which can run multiple queries. - ctx.setHiveTxnManager(txnMgr); - - if (ckLock) { - boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY); - if(lockOnlyMapred) { - Queue> taskQueue = new LinkedList>(); - taskQueue.addAll(plan.getRootTasks()); - while (taskQueue.peek() != null) { - Task tsk = taskQueue.remove(); - requireLock = requireLock || tsk.requireLock(); - if(requireLock) { - break; - } - if (tsk instanceof ConditionalTask) { - taskQueue.addAll(((ConditionalTask)tsk).getListTasks()); - } - if(tsk.getChildTasks()!= null) { - taskQueue.addAll(tsk.getChildTasks()); - } - // does not add back up task here, because back up task should be the same - // type of the original task. - } - } else { - requireLock = true; - } - } - - if (requireLock) { - ret = acquireReadWriteLocks(); - if (ret != 0) { - try { - releaseLocks(ctx.getHiveLocks()); - } catch (LockException e) { - // Not much to do here - } - return createProcessorResponse(ret); - } - } - - ret = execute(); - if (ret != 0) { - //if needRequireLock is false, the release here will do nothing because there is no lock - try { - releaseLocks(ctx.getHiveLocks()); - } catch (LockException e) { - // Nothing to do here - } - return createProcessorResponse(ret); - } - - //if needRequireLock is false, the release here will do nothing because there is no lock - try { - releaseLocks(ctx.getHiveLocks()); - } catch (LockException e) { - errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); - SQLState = ErrorMsg.findSQLState(e.getMessage()); - downstreamError = e; - console.printError(errorMessage + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return createProcessorResponse(12); - } - - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_RUN); - perfLogger.close(LOG, plan); - - // Take all the driver run hooks and post-execute them. - try { - for (HiveDriverRunHook driverRunHook : driverRunHooks) { - driverRunHook.postDriverRun(hookContext); - } - } catch (Exception e) { - errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); - SQLState = ErrorMsg.findSQLState(e.getMessage()); - downstreamError = e; - console.printError(errorMessage + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return createProcessorResponse(12); - } - - return createProcessorResponse(ret); - } - - private CommandProcessorResponse createProcessorResponse(int ret) { - return new CommandProcessorResponse(ret, errorMessage, SQLState, downstreamError); - } - - /** - * Validate configuration variables. - * - * @return - */ - private boolean validateConfVariables() { - boolean valid = true; - if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES)) - && ((conf.getBoolVar(HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE)) || (conf - .getBoolVar(HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) || ((conf - .getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE))))) { - errorMessage = "FAILED: Hive Internal Error: " - + ErrorMsg.SUPPORT_DIR_MUST_TRUE_FOR_LIST_BUCKETING.getMsg(); - SQLState = ErrorMsg.SUPPORT_DIR_MUST_TRUE_FOR_LIST_BUCKETING.getSQLState(); - console.printError(errorMessage + "\n"); - valid = false; - } - return valid; - } - - /** - * Returns a set of hooks specified in a configuration variable. - * See getHooks(HiveConf.ConfVars hookConfVar, Class clazz) - */ - private List getHooks(HiveConf.ConfVars hookConfVar) throws Exception { - return getHooks(hookConfVar, Hook.class); - } - - /** - * Returns the hooks specified in a configuration variable. - * - * @param hookConfVar The configuration variable specifying a comma separated list of the hook - * class names. - * @param clazz The super type of the hooks. - * @return A list of the hooks cast as the type specified in clazz, in the order - * they are listed in the value of hookConfVar - * @throws Exception - */ - private List getHooks(ConfVars hookConfVar, - Class clazz) throws Exception { - try { - return HookUtils.getHooks(conf, hookConfVar, clazz); - } catch (ClassNotFoundException e) { - console.printError(hookConfVar.varname + " Class not found:" + e.getMessage()); - throw e; - } - } - - public int execute() throws CommandNeedRetryException { - PerfLogger perfLogger = PerfLogger.getPerfLogger(); - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_EXECUTE); - - boolean noName = StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HADOOPJOBNAME)); - int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); - - String queryId = plan.getQueryId(); - String queryStr = plan.getQueryStr(); - - maxthreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.EXECPARALLETHREADNUMBER); - - try { - LOG.info("Starting command: " + queryStr); - - plan.setStarted(); - - if (SessionState.get() != null) { - SessionState.get().getHiveHistory().startQuery(queryStr, - conf.getVar(HiveConf.ConfVars.HIVEQUERYID)); - SessionState.get().getHiveHistory().logPlanProgress(plan); - } - resStream = null; - - HookContext hookContext = new HookContext(plan, conf, ctx.getPathToCS()); - hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK); - - for (Hook peh : getHooks(HiveConf.ConfVars.PREEXECHOOKS)) { - if (peh instanceof ExecuteWithHookContext) { - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName()); - - ((ExecuteWithHookContext) peh).run(hookContext); - - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName()); - } else if (peh instanceof PreExecute) { - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName()); - - ((PreExecute) peh).run(SessionState.get(), plan.getInputs(), plan.getOutputs(), - ShimLoader.getHadoopShims().getUGIForConf(conf)); - - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName()); - } - } - - int jobs = Utilities.getMRTasks(plan.getRootTasks()).size() - + Utilities.getTezTasks(plan.getRootTasks()).size(); - if (jobs > 0) { - console.printInfo("Query ID = " + plan.getQueryId()); - console.printInfo("Total jobs = " + jobs); - } - if (SessionState.get() != null) { - SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS, - String.valueOf(jobs)); - SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap()); - } - String jobname = Utilities.abbreviate(queryStr, maxlen - 6); - - // A runtime that launches runnable tasks as separate Threads through - // TaskRunners - // As soon as a task isRunnable, it is put in a queue - // At any time, at most maxthreads tasks can be running - // The main thread polls the TaskRunners to check if they have finished. - - DriverContext driverCxt = new DriverContext(ctx); - driverCxt.prepare(plan); - - ctx.setHDFSCleanup(true); - - this.driverCxt = driverCxt; // for canceling the query (should be bound to session?) - - SessionState.get().setMapRedStats(new LinkedHashMap()); - SessionState.get().setStackTraces(new HashMap>>()); - SessionState.get().setLocalMapRedErrors(new HashMap>()); - - // Add root Tasks to runnable - for (Task tsk : plan.getRootTasks()) { - // This should never happen, if it does, it's a bug with the potential to produce - // incorrect results. - assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty(); - driverCxt.addToRunnable(tsk); - } - - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TIME_TO_SUBMIT); - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RUN_TASKS); - // Loop while you either have tasks running, or tasks queued up - while (!destroyed && driverCxt.isRunning()) { - - // Launch upto maxthreads tasks - Task task; - while ((task = driverCxt.getRunnable(maxthreads)) != null) { - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TASK + task.getName() + "." + task.getId()); - TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt); - if (!runner.isRunning()) { - break; - } - } - - // poll the Tasks to see which one completed - TaskRunner tskRun = driverCxt.pollFinished(); - if (tskRun == null) { - continue; - } - hookContext.addCompleteTask(tskRun); - - Task tsk = tskRun.getTask(); - TaskResult result = tskRun.getTaskResult(); - - int exitVal = result.getExitVal(); - if (exitVal != 0) { - if (tsk.ifRetryCmdWhenFail()) { - driverCxt.shutdown(); - // in case we decided to run everything in local mode, restore the - // the jobtracker setting to its initial value - ctx.restoreOriginalTracker(); - throw new CommandNeedRetryException(); - } - Task backupTask = tsk.getAndInitBackupTask(); - if (backupTask != null) { - setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk); - console.printError(errorMessage); - errorMessage = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName(); - console.printError(errorMessage); - - // add backup task to runnable - if (DriverContext.isLaunchable(backupTask)) { - driverCxt.addToRunnable(backupTask); - } - continue; - - } else { - hookContext.setHookType(HookContext.HookType.ON_FAILURE_HOOK); - // Get all the failure execution hooks and execute them. - for (Hook ofh : getHooks(HiveConf.ConfVars.ONFAILUREHOOKS)) { - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.FAILURE_HOOK + ofh.getClass().getName()); - - ((ExecuteWithHookContext) ofh).run(hookContext); - - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.FAILURE_HOOK + ofh.getClass().getName()); - } - setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk); - SQLState = "08S01"; - console.printError(errorMessage); - driverCxt.shutdown(); - // in case we decided to run everything in local mode, restore the - // the jobtracker setting to its initial value - ctx.restoreOriginalTracker(); - return exitVal; - } - } - - driverCxt.finished(tskRun); - - if (SessionState.get() != null) { - SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(), - Keys.TASK_RET_CODE, String.valueOf(exitVal)); - SessionState.get().getHiveHistory().endTask(queryId, tsk); - } - - if (tsk.getChildTasks() != null) { - for (Task child : tsk.getChildTasks()) { - if (DriverContext.isLaunchable(child)) { - driverCxt.addToRunnable(child); - } - } - } - } - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RUN_TASKS); - - // in case we decided to run everything in local mode, restore the - // the jobtracker setting to its initial value - ctx.restoreOriginalTracker(); - - if (driverCxt.isShutdown()) { - SQLState = "HY008"; - errorMessage = "FAILED: Operation cancelled"; - console.printError(errorMessage); - return 1000; - } - - // remove incomplete outputs. - // Some incomplete outputs may be added at the beginning, for eg: for dynamic partitions. - // remove them - HashSet remOutputs = new HashSet(); - for (WriteEntity output : plan.getOutputs()) { - if (!output.isComplete()) { - remOutputs.add(output); - } - } - - for (WriteEntity output : remOutputs) { - plan.getOutputs().remove(output); - } - - hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK); - // Get all the post execution hooks and execute them. - for (Hook peh : getHooks(HiveConf.ConfVars.POSTEXECHOOKS)) { - if (peh instanceof ExecuteWithHookContext) { - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName()); - - ((ExecuteWithHookContext) peh).run(hookContext); - - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName()); - } else if (peh instanceof PostExecute) { - perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName()); - - ((PostExecute) peh).run(SessionState.get(), plan.getInputs(), plan.getOutputs(), - (SessionState.get() != null ? SessionState.get().getLineageState().getLineageInfo() - : null), ShimLoader.getHadoopShims().getUGIForConf(conf)); - - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName()); - } - } - - - if (SessionState.get() != null) { - SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE, - String.valueOf(0)); - SessionState.get().getHiveHistory().printRowCount(queryId); - } - } catch (CommandNeedRetryException e) { - throw e; - } catch (Exception e) { - ctx.restoreOriginalTracker(); - if (SessionState.get() != null) { - SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE, - String.valueOf(12)); - } - // TODO: do better with handling types of Exception here - errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); - SQLState = "08S01"; - downstreamError = e; - console.printError(errorMessage + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return (12); - } finally { - if (SessionState.get() != null) { - SessionState.get().getHiveHistory().endQuery(queryId); - } - if (noName) { - conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, ""); - } - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_EXECUTE); - - Map stats = SessionState.get().getMapRedStats(); - if (stats != null && !stats.isEmpty()) { - long totalCpu = 0; - console.printInfo("MapReduce Jobs Launched: "); - for (Map.Entry entry : stats.entrySet()) { - console.printInfo("Stage-" + entry.getKey() + ": " + entry.getValue()); - totalCpu += entry.getValue().getCpuMSec(); - } - console.printInfo("Total MapReduce CPU Time Spent: " + Utilities.formatMsecToStr(totalCpu)); - } - } - plan.setDone(); - - if (SessionState.get() != null) { - try { - SessionState.get().getLineageState().clear(); - SessionState.get().getHiveHistory().logPlanProgress(plan); - } catch (Exception e) { - // ignore - } - } - console.printInfo("OK"); - - return (0); - } - - private void setErrorMsgAndDetail(int exitVal, Throwable downstreamError, Task tsk) { - this.downstreamError = downstreamError; - errorMessage = "FAILED: Execution Error, return code " + exitVal + " from " + tsk.getClass().getName(); - if(downstreamError != null) { - //here we assume that upstream code may have parametrized the msg from ErrorMsg - //so we want to keep it - errorMessage += ". " + downstreamError.getMessage(); - } - else { - ErrorMsg em = ErrorMsg.getErrorMsg(exitVal); - if (em != null) { - errorMessage += ". " + em.getMsg(); - } - } - } - /** - * Launches a new task - * - * @param tsk - * task being launched - * @param queryId - * Id of the query containing the task - * @param noName - * whether the task has a name set - * @param jobname - * name of the task, if it is a map-reduce job - * @param jobs - * number of map-reduce jobs - * @param cxt - * the driver context - */ - private TaskRunner launchTask(Task tsk, String queryId, boolean noName, - String jobname, int jobs, DriverContext cxt) throws HiveException { - if (SessionState.get() != null) { - SessionState.get().getHiveHistory().startTask(queryId, tsk, tsk.getClass().getName()); - } - if (tsk.isMapRedTask() && !(tsk instanceof ConditionalTask)) { - if (noName) { - conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname + "(" + tsk.getId() + ")"); - } - conf.set("mapreduce.workflow.node.name", tsk.getId()); - Utilities.setWorkflowAdjacencies(conf, plan); - cxt.incCurJobNo(1); - console.printInfo("Launching Job " + cxt.getCurJobNo() + " out of " + jobs); - } - tsk.initialize(conf, plan, cxt); - TaskResult tskRes = new TaskResult(); - TaskRunner tskRun = new TaskRunner(tsk, tskRes); - - cxt.launching(tskRun); - // Launch Task - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.EXECPARALLEL) && tsk.isMapRedTask()) { - // Launch it in the parallel mode, as a separate thread only for MR tasks - tskRun.start(); - } else { - tskRun.runSequential(); - } - return tskRun; - } - - public boolean isFetchingTable() { - return plan != null && plan.getFetchTask() != null; - } - - @SuppressWarnings("unchecked") - public boolean getResults(List res) throws IOException, CommandNeedRetryException { - if (destroyed) { - throw new IOException("FAILED: Operation cancelled"); - } - if (isFetchingTable()) { - FetchTask ft = plan.getFetchTask(); - ft.setMaxRows(maxRows); - return ft.fetch(res); - } - - if (resStream == null) { - resStream = ctx.getStream(); - } - if (resStream == null) { - return false; - } - - int numRows = 0; - String row = null; - - while (numRows < maxRows) { - if (resStream == null) { - if (numRows > 0) { - return true; - } else { - return false; - } - } - - bos.reset(); - Utilities.StreamStatus ss; - try { - ss = Utilities.readColumn(resStream, bos); - if (bos.getLength() > 0) { - row = new String(bos.getData(), 0, bos.getLength(), "UTF-8"); - } else if (ss == Utilities.StreamStatus.TERMINATED) { - row = new String(); - } - - if (row != null) { - numRows++; - res.add(row); - } - row = null; - } catch (IOException e) { - console.printError("FAILED: Unexpected IO exception : " + e.getMessage()); - return false; - } - - if (ss == Utilities.StreamStatus.EOF) { - resStream = ctx.getStream(); - } - } - return true; - } - - public void resetFetch() throws IOException { - if (plan != null && plan.getFetchTask() != null) { - try { - plan.getFetchTask().clearFetch(); - } catch (Exception e) { - throw new IOException("Error closing the current fetch task", e); - } - plan.getFetchTask().initialize(conf, plan, null); - } else { - ctx.resetStream(); - resStream = null; - } - } - - public int getTryCount() { - return tryCount; - } - - public void setTryCount(int tryCount) { - this.tryCount = tryCount; - } - - - public int close() { - try { - if (plan != null) { - FetchTask fetchTask = plan.getFetchTask(); - if (null != fetchTask) { - try { - fetchTask.clearFetch(); - } catch (Exception e) { - LOG.debug(" Exception while clearing the Fetch task ", e); - } - } - } - if (driverCxt != null) { - driverCxt.shutdown(); - driverCxt = null; - } - if (ctx != null) { - ctx.clear(); - } - if (null != resStream) { - try { - ((FSDataInputStream) resStream).close(); - } catch (Exception e) { - LOG.debug(" Exception while closing the resStream ", e); - } - } - } catch (Exception e) { - console.printError("FAILED: Hive Internal Error: " + Utilities.getNameMessage(e) + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return 13; - } - - return 0; - } - - public void destroy() { - if (destroyed) { - return; - } - destroyed = true; - if (ctx != null) { - try { - releaseLocks(ctx.getHiveLocks()); - } catch (LockException e) { - LOG.warn("Exception when releasing locking in destroy: " + - e.getMessage()); - } - } - if (txnMgr != null) { - txnMgr.closeTxnManager(); - } - } - - public org.apache.hadoop.hive.ql.plan.api.Query getQueryPlan() throws IOException { - return plan.getQueryPlan(); - } - - public String getErrorMsg() { - return errorMessage; - } + static final private String CLASS_NAME = Driver.class.getName(); + static final private Log LOG = LogFactory.getLog(CLASS_NAME); + static final private LogHelper console = new LogHelper(LOG); + + private static final Object compileMonitor = new Object(); + + private int maxRows = 100; + ByteStream.Output bos = new ByteStream.Output(); + + private HiveConf conf; + private DataInput resStream; + private Context ctx; + private DriverContext driverCxt; + private QueryPlan plan; + private Schema schema; + private String errorMessage; + private String SQLState; + private Throwable downstreamError; + private HiveTxnManager txnMgr; + + // A limit on the number of threads that can be launched + private int maxthreads; + private int tryCount = Integer.MAX_VALUE; + + private boolean destroyed; + + private String userName; + + private void createTxnManager() throws SemanticException { + if (txnMgr == null) { + try { + txnMgr = TxnManagerFactory.getTxnManagerFactory() + .getTxnManager(conf); + } catch (LockException e) { + throw new SemanticException(e.getMessage(), e); + } + } + } + + private boolean checkConcurrency() throws SemanticException { + boolean supportConcurrency = conf + .getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); + if (!supportConcurrency) { + LOG.info("Concurrency mode is disabled, not creating a lock manager"); + return false; + } + return true; + } + + @Override + public void init() { + Operator.resetId(); + } + + /** + * Return the status information about the Map-Reduce cluster + */ + public ClusterStatus getClusterStatus() throws Exception { + ClusterStatus cs; + try { + JobConf job = new JobConf(conf); + JobClient jc = new JobClient(job); + cs = jc.getClusterStatus(); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + LOG.info("Returning cluster status: " + cs.toString()); + return cs; + } + + public Schema getSchema() { + return schema; + } + + /** + * Get a Schema with fields represented with native Hive types + */ + public static Schema getSchema(BaseSemanticAnalyzer sem, HiveConf conf) { + Schema schema = null; + + // If we have a plan, prefer its logical result schema if it's + // available; otherwise, try digging out a fetch task; failing that, + // give up. + if (sem == null) { + // can't get any info without a plan + } else if (sem.getResultSchema() != null) { + List lst = sem.getResultSchema(); + schema = new Schema(lst, null); + } else if (sem.getFetchTask() != null) { + FetchTask ft = sem.getFetchTask(); + TableDesc td = ft.getTblDesc(); + // partitioned tables don't have tableDesc set on the FetchTask. + // Instead + // they have a list of PartitionDesc objects, each with a table + // desc. + // Let's + // try to fetch the desc for the first partition and use it's + // deserializer. + if (td == null && ft.getWork() != null + && ft.getWork().getPartDesc() != null) { + if (ft.getWork().getPartDesc().size() > 0) { + td = ft.getWork().getPartDesc().get(0).getTableDesc(); + } + } + + if (td == null) { + LOG.info("No returning schema."); + } else { + String tableName = "result"; + List lst = null; + try { + lst = MetaStoreUtils.getFieldsFromDeserializer(tableName, + td.getDeserializer()); + } catch (Exception e) { + LOG.warn("Error getting schema: " + + org.apache.hadoop.util.StringUtils + .stringifyException(e)); + } + if (lst != null) { + schema = new Schema(lst, null); + } + } + } + if (schema == null) { + schema = new Schema(); + } + LOG.info("Returning Hive schema: " + schema); + return schema; + } + + /** + * Get a Schema with fields represented with Thrift DDL types + */ + public Schema getThriftSchema() throws Exception { + Schema schema; + try { + schema = getSchema(); + if (schema != null) { + List lst = schema.getFieldSchemas(); + // Go over the schema and convert type to thrift type + if (lst != null) { + for (FieldSchema f : lst) { + f.setType(MetaStoreUtils.typeToThriftType(f.getType())); + } + } + } + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + LOG.info("Returning Thrift schema: " + schema); + return schema; + } + + /** + * Return the maximum number of rows returned by getResults + */ + public int getMaxRows() { + return maxRows; + } + + /** + * Set the maximum number of rows returned by getResults + */ + public void setMaxRows(int maxRows) { + this.maxRows = maxRows; + } + + /** + * for backwards compatibility with current tests + */ + public Driver(HiveConf conf) { + this.conf = conf; + } + + public Driver(HiveConf conf, String userName) { + this(conf); + this.userName = userName; + } + + public Driver() { + if (SessionState.get() != null) { + conf = SessionState.get().getConf(); + } + } + + /** + * Compile a new query. Any currently-planned query associated with this + * Driver is discarded. Do not reset id for inner queries(index, etc). Task + * ids are used for task identity check. + * + * @param command + * The SQL query to compile. + */ + public int compile(String command) { + return compile(command, true); + } + + /** + * Hold state variables specific to each query being executed, that may not + * be consistent in the overall SessionState + */ + private static class QueryState { + private HiveOperation op; + private String cmd; + private boolean init = false; + + /** + * Initialize the queryState with the query state variables + */ + public void init(HiveOperation op, String cmd) { + this.op = op; + this.cmd = cmd; + this.init = true; + } + + public boolean isInitialized() { + return this.init; + } + + public HiveOperation getOp() { + return this.op; + } + + public String getCmd() { + return this.cmd; + } + } + + public void saveSession(QueryState qs) { + SessionState oldss = SessionState.get(); + if (oldss != null && oldss.getHiveOperation() != null) { + qs.init(oldss.getHiveOperation(), oldss.getCmd()); + } + } + + public void restoreSession(QueryState qs) { + SessionState ss = SessionState.get(); + if (ss != null && qs != null && qs.isInitialized()) { + ss.setCmd(qs.getCmd()); + ss.setCommandType(qs.getOp()); + } + } + + /** + * Compile a new query, but potentially reset taskID counter. Not resetting + * task counter is useful for generating re-entrant QL queries. + * + * @param command + * The HiveQL query to compile + * @param resetTaskIds + * Resets taskID counter if true. + * @return 0 for ok + */ + public int compile(String command, boolean resetTaskIds) { + + PerfLogger perfLogger = PerfLogger.getPerfLogger(); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE); + + // holder for parent command type/string when executing reentrant + // queries + QueryState queryState = new QueryState(); + + if (plan != null) { + close(); + plan = null; + } + + if (resetTaskIds) { + TaskFactory.resetId(); + } + saveSession(queryState); + + // generate new query id + String queryId = QueryPlan.makeQueryId(); + conf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + + try { + command = new VariableSubstitution().substitute(conf, command); + ctx = new Context(conf); + ctx.setTryCount(getTryCount()); + ctx.setCmd(command); + ctx.setHDFSCleanup(true); + + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE); + ParseDriver pd = new ParseDriver(); + ASTNode tree = pd.parse(command, ctx); + tree = ParseUtils.findRootNonNullToken(tree); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE); + + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE); + BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(conf, tree); + List saHooks = getHooks( + HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, + HiveSemanticAnalyzerHook.class); + + // Do semantic analysis and plan generation + if (saHooks != null) { + HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl(); + hookCtx.setConf(conf); + hookCtx.setUserName(userName); + for (HiveSemanticAnalyzerHook hook : saHooks) { + tree = hook.preAnalyze(hookCtx, tree); + } + sem.analyze(tree, ctx); + hookCtx.update(sem); + for (HiveSemanticAnalyzerHook hook : saHooks) { + hook.postAnalyze(hookCtx, sem.getRootTasks()); + } + } else { + sem.analyze(tree, ctx); + } + + LOG.info("Semantic Analysis Completed"); + + // validate the plan + sem.validate(); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE); + + plan = new QueryPlan(command, sem, + perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId); + + String queryStr = plan.getQueryStr(); + conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr); + + conf.set("mapreduce.workflow.id", "hive_" + queryId); + conf.set("mapreduce.workflow.name", queryStr); + + // initialize FetchTask right here + if (plan.getFetchTask() != null) { + plan.getFetchTask().initialize(conf, plan, null); + } + + // get the output schema + schema = getSchema(sem, conf); + + // do the authorization check + if (!sem.skipAuthorization() + && HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) { + + try { + perfLogger.PerfLogBegin(CLASS_NAME, + PerfLogger.DO_AUTHORIZATION); + doAuthorization(sem); + } catch (AuthorizationException authExp) { + console.printError("Authorization failed:" + + authExp.getMessage() + + ". Use SHOW GRANT to get more details."); + errorMessage = authExp.getMessage(); + SQLState = "42000"; + return 403; + } finally { + perfLogger.PerfLogEnd(CLASS_NAME, + PerfLogger.DO_AUTHORIZATION); + } + } + + return 0; + } catch (Exception e) { + ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage()); + errorMessage = "FAILED: " + e.getClass().getSimpleName(); + if (error != ErrorMsg.GENERIC_ERROR) { + errorMessage += " [Error " + error.getErrorCode() + "]:"; + } + + // HIVE-4889 + if ((e instanceof IllegalArgumentException) + && e.getMessage() == null && e.getCause() != null) { + errorMessage += " " + e.getCause().getMessage(); + } else { + errorMessage += " " + e.getMessage(); + } + + SQLState = error.getSQLState(); + downstreamError = e; + console.printError(errorMessage, "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return error.getErrorCode(); + } finally { + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE); + restoreSession(queryState); + } + } + + public static void doAuthorization(BaseSemanticAnalyzer sem) + throws HiveException, AuthorizationException { + HashSet inputs = sem.getInputs(); + HashSet outputs = sem.getOutputs(); + SessionState ss = SessionState.get(); + HiveOperation op = ss.getHiveOperation(); + Hive db = sem.getDb(); + if (ss.isAuthorizationModeV2()) { + doAuthorizationV2(ss, op, inputs, outputs); + return; + } + if (op == null) { + throw new HiveException("Operation should not be null"); + } + HiveAuthorizationProvider authorizer = ss.getAuthorizer(); + if (op.equals(HiveOperation.CREATEDATABASE)) { + authorizer.authorize(op.getInputRequiredPrivileges(), + op.getOutputRequiredPrivileges()); + } else if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) + || op.equals(HiveOperation.CREATETABLE)) { + authorizer.authorize(db.getDatabase(SessionState.get() + .getCurrentDatabase()), null, + HiveOperation.CREATETABLE_AS_SELECT + .getOutputRequiredPrivileges()); + } else { + if (op.equals(HiveOperation.IMPORT)) { + ImportSemanticAnalyzer isa = (ImportSemanticAnalyzer) sem; + if (!isa.existsTable()) { + authorizer.authorize(db.getDatabase(SessionState.get() + .getCurrentDatabase()), null, + HiveOperation.CREATETABLE_AS_SELECT + .getOutputRequiredPrivileges()); + } + } + } + if (outputs != null && outputs.size() > 0) { + for (WriteEntity write : outputs) { + if (write.isDummy()) { + continue; + } + if (write.getType() == Entity.Type.DATABASE) { + authorizer.authorize(write.getDatabase(), null, + op.getOutputRequiredPrivileges()); + continue; + } + + if (write.getType() == WriteEntity.Type.PARTITION) { + Partition part = db.getPartition(write.getTable(), write + .getPartition().getSpec(), false); + if (part != null) { + authorizer.authorize(write.getPartition(), null, + op.getOutputRequiredPrivileges()); + continue; + } + } + + if (write.getTable() != null) { + authorizer.authorize(write.getTable(), null, + op.getOutputRequiredPrivileges()); + } + } + } + + if (inputs != null && inputs.size() > 0) { + Map> tab2Cols = new HashMap>(); + Map> part2Cols = new HashMap>(); + + // determine if partition level privileges should be checked for + // input tables + Map tableUsePartLevelAuth = new HashMap(); + for (ReadEntity read : inputs) { + if (read.isDummy() || read.getType() == Entity.Type.DATABASE) { + continue; + } + Table tbl = read.getTable(); + if ((read.getPartition() != null) + || (tbl != null && tbl.isPartitioned())) { + String tblName = tbl.getTableName(); + if (tableUsePartLevelAuth.get(tblName) == null) { + boolean usePartLevelPriv = (tbl.getParameters().get( + "PARTITION_LEVEL_PRIVILEGE") != null && ("TRUE" + .equalsIgnoreCase(tbl.getParameters().get( + "PARTITION_LEVEL_PRIVILEGE")))); + if (usePartLevelPriv) { + tableUsePartLevelAuth.put(tblName, Boolean.TRUE); + } else { + tableUsePartLevelAuth.put(tblName, Boolean.FALSE); + } + } + } + } + + // for a select or create-as-select query, populate the partition to + // column (par2Cols) or + // table to columns mapping (tab2Cols) + if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) + || op.equals(HiveOperation.QUERY)) { + SemanticAnalyzer querySem = (SemanticAnalyzer) sem; + ParseContext parseCtx = querySem.getParseContext(); + Map tsoTopMap = parseCtx + .getTopToTable(); + + for (Map.Entry> topOpMap : querySem + .getParseContext().getTopOps().entrySet()) { + Operator topOp = topOpMap + .getValue(); + if (topOp instanceof TableScanOperator + && tsoTopMap.containsKey(topOp)) { + TableScanOperator tableScanOp = (TableScanOperator) topOp; + Table tbl = tsoTopMap.get(tableScanOp); + List neededColumnIds = tableScanOp + .getNeededColumnIDs(); + List columns = tbl.getCols(); + List cols = new ArrayList(); + for (int i = 0; i < neededColumnIds.size(); i++) { + cols.add(columns.get(neededColumnIds.get(i)) + .getName()); + } + // map may not contain all sources, since input list may + // have been optimized out + // or non-existent tho such sources may still be + // referenced by the TableScanOperator + // if it's null then the partition probably doesn't + // exist so let's use table permission + if (tbl.isPartitioned() + && tableUsePartLevelAuth + .get(tbl.getTableName()) == Boolean.TRUE) { + String alias_id = topOpMap.getKey(); + + PrunedPartitionList partsList = PartitionPruner + .prune(tableScanOp, parseCtx, alias_id); + Set parts = partsList.getPartitions(); + for (Partition part : parts) { + List existingCols = part2Cols.get(part); + if (existingCols == null) { + existingCols = new ArrayList(); + } + existingCols.addAll(cols); + part2Cols.put(part, existingCols); + } + } else { + List existingCols = tab2Cols.get(tbl); + if (existingCols == null) { + existingCols = new ArrayList(); + } + existingCols.addAll(cols); + tab2Cols.put(tbl, existingCols); + } + } + } + } + + // cache the results for table authorization + Set tableAuthChecked = new HashSet(); + for (ReadEntity read : inputs) { + if (read.isDummy()) { + continue; + } + if (read.getType() == Entity.Type.DATABASE) { + authorizer.authorize(read.getDatabase(), + op.getInputRequiredPrivileges(), null); + continue; + } + Table tbl = read.getTable(); + if (read.getPartition() != null) { + Partition partition = read.getPartition(); + tbl = partition.getTable(); + // use partition level authorization + if (tableUsePartLevelAuth.get(tbl.getTableName()) == Boolean.TRUE) { + List cols = part2Cols.get(partition); + if (cols != null && cols.size() > 0) { + authorizer.authorize(partition.getTable(), + partition, cols, + op.getInputRequiredPrivileges(), null); + } else { + authorizer.authorize(partition, + op.getInputRequiredPrivileges(), null); + } + continue; + } + } + + // if we reach here, it means it needs to do a table + // authorization + // check, and the table authorization may already happened + // because of other + // partitions + if (tbl != null + && !tableAuthChecked.contains(tbl.getTableName()) + && !(tableUsePartLevelAuth.get(tbl.getTableName()) == Boolean.TRUE)) { + List cols = tab2Cols.get(tbl); + if (cols != null && cols.size() > 0) { + authorizer.authorize(tbl, null, cols, + op.getInputRequiredPrivileges(), null); + } else { + authorizer.authorize(tbl, + op.getInputRequiredPrivileges(), null); + } + tableAuthChecked.add(tbl.getTableName()); + } + } + + } + } + + private static void doAuthorizationV2(SessionState ss, HiveOperation op, + HashSet inputs, HashSet outputs) + throws HiveException { + HiveOperationType hiveOpType = getHiveOperationType(op); + List inputsHObjs = getHivePrivObjects(inputs); + List outputHObjs = getHivePrivObjects(outputs); + ss.getAuthorizerV2().checkPrivileges(hiveOpType, inputsHObjs, + outputHObjs); + return; + } + + private static List getHivePrivObjects( + HashSet privObjects) { + List hivePrivobjs = new ArrayList(); + if (privObjects == null) { + return hivePrivobjs; + } + for (Entity privObject : privObjects) { + HivePrivilegeObjectType privObjType = AuthorizationUtils + .getHivePrivilegeObjectType(privObject.getType()); + + if (privObject instanceof ReadEntity + && !((ReadEntity) privObject).isDirect()) { + // In case of views, the underlying views or tables are not + // direct dependencies + // and are not used for authorization checks. + // This ReadEntity represents one of the underlying + // tables/views, so skip it. + // See description of the isDirect in ReadEntity + continue; + } + if (privObject instanceof WriteEntity + && ((WriteEntity) privObject).isTempURI()) { + // do not authorize temporary uris + continue; + } + + // support for authorization on partitions needs to be added + String dbname = null; + String tableURI = null; + switch (privObject.getType()) { + case DATABASE: + dbname = privObject.getDatabase() == null ? null : privObject + .getDatabase().getName(); + break; + case TABLE: + dbname = privObject.getTable() == null ? null : privObject + .getTable().getDbName(); + tableURI = privObject.getTable() == null ? null : privObject + .getTable().getTableName(); + break; + case DFS_DIR: + case LOCAL_DIR: + tableURI = privObject.getD(); + break; + case DUMMYPARTITION: + case PARTITION: + // not currently handled + continue; + default: + throw new AssertionError("Unexpected object type"); + } + HivePrivObjectActionType actionType = AuthorizationUtils + .getActionType(privObject); + HivePrivilegeObject hPrivObject = new HivePrivilegeObject( + privObjType, dbname, tableURI, actionType); + hivePrivobjs.add(hPrivObject); + } + return hivePrivobjs; + } + + private static HiveOperationType getHiveOperationType(HiveOperation op) { + return HiveOperationType.valueOf(op.name()); + } + + /** + * @return The current query plan associated with this Driver, if any. + */ + public QueryPlan getPlan() { + return plan; + } + + /** + * @param d + * The database to be locked + * @param t + * The table to be locked + * @param p + * The partition to be locked + * @param mode + * The mode of the lock (SHARED/EXCLUSIVE) Get the list of + * objects to be locked. If a partition needs to be locked (in + * any mode), all its parents should also be locked in SHARED + * mode. + */ + private List getLockObjects(Database d, Table t, Partition p, + HiveLockMode mode) throws SemanticException { + List locks = new LinkedList(); + + HiveLockObjectData lockData = new HiveLockObjectData(plan.getQueryId(), + String.valueOf(System.currentTimeMillis()), "IMPLICIT", + plan.getQueryStr()); + if (d != null) { + locks.add(new HiveLockObj( + new HiveLockObject(d.getName(), lockData), mode)); + return locks; + } + + if (t != null) { + locks.add(new HiveLockObj(new HiveLockObject(t.getDbName(), + lockData), mode)); + locks.add(new HiveLockObj(new HiveLockObject(t, lockData), mode)); + mode = HiveLockMode.SHARED; + locks.add(new HiveLockObj(new HiveLockObject(t.getDbName(), + lockData), mode)); + return locks; + } + + if (p != null) { + locks.add(new HiveLockObj(new HiveLockObject(p.getTable() + .getDbName(), lockData), mode)); + if (!(p instanceof DummyPartition)) { + locks.add(new HiveLockObj(new HiveLockObject(p, lockData), mode)); + } + + // All the parents are locked in shared mode + mode = HiveLockMode.SHARED; + + // For dummy partitions, only partition name is needed + String name = p.getName(); + + if (p instanceof DummyPartition) { + name = p.getName().split("@")[2]; + } + + String partialName = ""; + String[] partns = name.split("/"); + int len = p instanceof DummyPartition ? partns.length + : partns.length - 1; + Map partialSpec = new LinkedHashMap(); + for (int idx = 0; idx < len; idx++) { + String partn = partns[idx]; + partialName += partn; + String[] nameValue = partn.split("="); + assert (nameValue.length == 2); + partialSpec.put(nameValue[0], nameValue[1]); + try { + locks.add(new HiveLockObj(new HiveLockObject( + new DummyPartition(p.getTable(), p.getTable() + .getDbName() + + "/" + + p.getTable().getTableName() + + "/" + + partialName, partialSpec), lockData), + mode)); + partialName += "/"; + } catch (HiveException e) { + throw new SemanticException(e.getMessage()); + } + } + + locks.add(new HiveLockObj( + new HiveLockObject(p.getTable(), lockData), mode)); + locks.add(new HiveLockObj(new HiveLockObject(p.getTable() + .getDbName(), lockData), mode)); + } + + return locks; + } + + // Write the current set of valid transactions into the conf file so that it + // can be read by + // the input format. + private int recordValidTxns() { + try { + ValidTxnList txns = txnMgr.getValidTxns(); + conf.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); + return 0; + } catch (LockException e) { + errorMessage = "FAILED: Error in determing valid transactions: " + + e.getMessage(); + SQLState = ErrorMsg.findSQLState(e.getMessage()); + downstreamError = e; + console.printError(errorMessage, "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return 10; + } + } + + /** + * Acquire read and write locks needed by the statement. The list of objects + * to be locked are obtained from he inputs and outputs populated by the + * compiler. The lock acuisition scheme is pretty simple. If all the locks + * cannot be obtained, error out. Deadlock is avoided by making sure that + * the locks are lexicographically sorted. + **/ + private int acquireReadWriteLocks() { + PerfLogger perfLogger = PerfLogger.getPerfLogger(); + perfLogger + .PerfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS); + + try { + txnMgr.acquireLocks(plan, ctx, userName); + return 0; + } catch (LockException e) { + errorMessage = "FAILED: Error in acquiring locks: " + + e.getMessage(); + SQLState = ErrorMsg.findSQLState(e.getMessage()); + downstreamError = e; + console.printError(errorMessage, "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return 10; + } finally { + perfLogger.PerfLogEnd(CLASS_NAME, + PerfLogger.ACQUIRE_READ_WRITE_LOCKS); + } + } + + /** + * @param hiveLocks + * list of hive locks to be released Release all the locks + * specified. If some of the locks have already been released, + * ignore them + **/ + private void releaseLocks(List hiveLocks) throws LockException { + PerfLogger perfLogger = PerfLogger.getPerfLogger(); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS); + + if (hiveLocks != null) { + ctx.getHiveTxnManager().getLockManager().releaseLocks(hiveLocks); + } + ctx.setHiveLocks(null); + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS); + } + + @Override + public CommandProcessorResponse run(String command) + throws CommandNeedRetryException { + return run(command, false); + } + + public CommandProcessorResponse run() throws CommandNeedRetryException { + return run(null, true); + } + + public CommandProcessorResponse run(String command, boolean alreadyCompiled) + throws CommandNeedRetryException { + CommandProcessorResponse cpr = runInternal(command, alreadyCompiled); + if (cpr.getResponseCode() == 0) { + return cpr; + } + SessionState ss = SessionState.get(); + if (ss == null) { + return cpr; + } + MetaDataFormatter mdf = MetaDataFormatUtils.getFormatter(ss.getConf()); + if (!(mdf instanceof JsonMetaDataFormatter)) { + return cpr; + } + /* + * Here we want to encode the error in machine readable way (e.g. JSON) + * Ideally, errorCode would always be set to a canonical error defined + * in ErrorMsg. In practice that is rarely the case, so the messy logic + * below tries to tease out canonical error code if it can. Exclude + * stack trace from output when the error is a specific/expected one. + * It's written to stdout for backward compatibility (WebHCat consumes + * it). + */ + try { + if (downstreamError == null) { + mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState); + return cpr; + } + ErrorMsg canonicalErr = ErrorMsg.getErrorMsg(cpr.getResponseCode()); + if (canonicalErr != null && canonicalErr != ErrorMsg.GENERIC_ERROR) { + /* + * Some HiveExceptions (e.g. SemanticException) don't set + * canonical ErrorMsg explicitly, but there is logic (e.g. + * #compile()) to find an appropriate canonical error and return + * its code as error code. In this case we want to preserve it + * for downstream code to interpret + */ + mdf.error(ss.out, errorMessage, cpr.getResponseCode(), + SQLState, null); + return cpr; + } + if (downstreamError instanceof HiveException) { + HiveException rc = (HiveException) downstreamError; + mdf.error( + ss.out, + errorMessage, + rc.getCanonicalErrorMsg().getErrorCode(), + SQLState, + rc.getCanonicalErrorMsg() == ErrorMsg.GENERIC_ERROR ? org.apache.hadoop.util.StringUtils + .stringifyException(rc) : null); + } else { + ErrorMsg canonicalMsg = ErrorMsg.getErrorMsg(downstreamError + .getMessage()); + mdf.error(ss.out, errorMessage, canonicalMsg.getErrorCode(), + SQLState, org.apache.hadoop.util.StringUtils + .stringifyException(downstreamError)); + } + } catch (HiveException ex) { + console.printError("Unable to JSON-encode the error", + org.apache.hadoop.util.StringUtils.stringifyException(ex)); + } + return cpr; + } + + public CommandProcessorResponse compileAndRespond(String command) { + return createProcessorResponse(compileInternal(command)); + } + + private int compileInternal(String command) { + int ret; + synchronized (compileMonitor) { + ret = compile(command); + } + if (ret != 0) { + try { + releaseLocks(ctx.getHiveLocks()); + } catch (LockException e) { + LOG.warn("Exception in releasing locks. " + + org.apache.hadoop.util.StringUtils + .stringifyException(e)); + } + } + return ret; + } + + private CommandProcessorResponse runInternal(String command, + boolean alreadyCompiled) throws CommandNeedRetryException { + errorMessage = null; + SQLState = null; + downstreamError = null; + + if (!validateConfVariables()) { + return createProcessorResponse(12); + } + + HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl( + conf, command); + // Get all the driver run hooks and pre-execute them. + List driverRunHooks; + try { + driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, + HiveDriverRunHook.class); + for (HiveDriverRunHook driverRunHook : driverRunHooks) { + driverRunHook.preDriverRun(hookContext); + } + } catch (Exception e) { + errorMessage = "FAILED: Hive Internal Error: " + + Utilities.getNameMessage(e); + SQLState = ErrorMsg.findSQLState(e.getMessage()); + downstreamError = e; + console.printError(errorMessage + "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return createProcessorResponse(12); + } + + // Reset the perf logger + PerfLogger perfLogger = PerfLogger.getPerfLogger(true); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TIME_TO_SUBMIT); + + boolean requireLock = false; + boolean ckLock = false; + try { + ckLock = checkConcurrency(); + createTxnManager(); + } catch (SemanticException e) { + errorMessage = "FAILED: Error in semantic analysis: " + + e.getMessage(); + SQLState = ErrorMsg.findSQLState(e.getMessage()); + downstreamError = e; + console.printError(errorMessage, "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return createProcessorResponse(10); + } + int ret = recordValidTxns(); + if (ret != 0) { + return createProcessorResponse(ret); + } + + if (!alreadyCompiled) { + ret = compileInternal(command); + if (ret != 0) { + return createProcessorResponse(ret); + } + } + + // the reason that we set the txn manager for the cxt here is because + // each + // query has its own ctx object. The txn mgr is shared across the + // same instance of Driver, which can run multiple queries. + ctx.setHiveTxnManager(txnMgr); + + if (ckLock) { + boolean lockOnlyMapred = HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY); + if (lockOnlyMapred) { + Queue> taskQueue = new LinkedList>(); + taskQueue.addAll(plan.getRootTasks()); + while (taskQueue.peek() != null) { + Task tsk = taskQueue.remove(); + requireLock = requireLock || tsk.requireLock(); + if (requireLock) { + break; + } + if (tsk instanceof ConditionalTask) { + taskQueue + .addAll(((ConditionalTask) tsk).getListTasks()); + } + if (tsk.getChildTasks() != null) { + taskQueue.addAll(tsk.getChildTasks()); + } + // does not add back up task here, because back up task + // should be the same + // type of the original task. + } + } else { + requireLock = true; + } + } + + if (requireLock) { + ret = acquireReadWriteLocks(); + if (ret != 0) { + try { + releaseLocks(ctx.getHiveLocks()); + } catch (LockException e) { + // Not much to do here + } + return createProcessorResponse(ret); + } + } + + ret = execute(); + if (ret != 0) { + // if needRequireLock is false, the release here will do nothing + // because there is no lock + try { + releaseLocks(ctx.getHiveLocks()); + } catch (LockException e) { + // Nothing to do here + } + return createProcessorResponse(ret); + } + + // if needRequireLock is false, the release here will do nothing because + // there is no lock + try { + releaseLocks(ctx.getHiveLocks()); + } catch (LockException e) { + errorMessage = "FAILED: Hive Internal Error: " + + Utilities.getNameMessage(e); + SQLState = ErrorMsg.findSQLState(e.getMessage()); + downstreamError = e; + console.printError(errorMessage + "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return createProcessorResponse(12); + } + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_RUN); + perfLogger.close(LOG, plan); + + // Take all the driver run hooks and post-execute them. + try { + for (HiveDriverRunHook driverRunHook : driverRunHooks) { + driverRunHook.postDriverRun(hookContext); + } + } catch (Exception e) { + errorMessage = "FAILED: Hive Internal Error: " + + Utilities.getNameMessage(e); + SQLState = ErrorMsg.findSQLState(e.getMessage()); + downstreamError = e; + console.printError(errorMessage + "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return createProcessorResponse(12); + } + + return createProcessorResponse(ret); + } + + private CommandProcessorResponse createProcessorResponse(int ret) { + return new CommandProcessorResponse(ret, errorMessage, SQLState, + downstreamError); + } + + /** + * Validate configuration variables. + * + * @return + */ + private boolean validateConfVariables() { + boolean valid = true; + if ((!conf + .getBoolVar(HiveConf.ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES)) + && ((conf + .getBoolVar(HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE)) + || (conf.getBoolVar(HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) || ((conf + .getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE))))) { + errorMessage = "FAILED: Hive Internal Error: " + + ErrorMsg.SUPPORT_DIR_MUST_TRUE_FOR_LIST_BUCKETING + .getMsg(); + SQLState = ErrorMsg.SUPPORT_DIR_MUST_TRUE_FOR_LIST_BUCKETING + .getSQLState(); + console.printError(errorMessage + "\n"); + valid = false; + } + return valid; + } + + /** + * Returns a set of hooks specified in a configuration variable. See + * getHooks(HiveConf.ConfVars hookConfVar, Class clazz) + */ + private List getHooks(HiveConf.ConfVars hookConfVar) throws Exception { + return getHooks(hookConfVar, Hook.class); + } + + /** + * Returns the hooks specified in a configuration variable. + * + * @param hookConfVar + * The configuration variable specifying a comma separated list + * of the hook class names. + * @param clazz + * The super type of the hooks. + * @return A list of the hooks cast as the type specified in clazz, in the + * order they are listed in the value of hookConfVar + * @throws Exception + */ + private List getHooks(ConfVars hookConfVar, + Class clazz) throws Exception { + try { + return HookUtils.getHooks(conf, hookConfVar, clazz); + } catch (ClassNotFoundException e) { + console.printError(hookConfVar.varname + " Class not found:" + + e.getMessage()); + throw e; + } + } + + public int execute() throws CommandNeedRetryException { + PerfLogger perfLogger = PerfLogger.getPerfLogger(); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_EXECUTE); + + boolean noName = StringUtils.isEmpty(conf + .getVar(HiveConf.ConfVars.HADOOPJOBNAME)); + int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + + String queryId = plan.getQueryId(); + String queryStr = plan.getQueryStr(); + + maxthreads = HiveConf.getIntVar(conf, + HiveConf.ConfVars.EXECPARALLETHREADNUMBER); + + try { + LOG.info("Starting command: " + queryStr); + + plan.setStarted(); + + if (SessionState.get() != null) { + SessionState + .get() + .getHiveHistory() + .startQuery(queryStr, + conf.getVar(HiveConf.ConfVars.HIVEQUERYID)); + SessionState.get().getHiveHistory().logPlanProgress(plan); + } + resStream = null; + + HookContext hookContext = new HookContext(plan, conf, + ctx.getPathToCS()); + hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK); + + for (Hook peh : getHooks(HiveConf.ConfVars.PREEXECHOOKS)) { + if (peh instanceof ExecuteWithHookContext) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRE_HOOK + + peh.getClass().getName()); + + ((ExecuteWithHookContext) peh).run(hookContext); + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRE_HOOK + + peh.getClass().getName()); + } else if (peh instanceof PreExecute) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRE_HOOK + + peh.getClass().getName()); + + ((PreExecute) peh).run(SessionState.get(), + plan.getInputs(), plan.getOutputs(), ShimLoader + .getHadoopShims().getUGIForConf(conf)); + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRE_HOOK + + peh.getClass().getName()); + } + } + + int jobs = Utilities.getMRTasks(plan.getRootTasks()).size() + + Utilities.getTezTasks(plan.getRootTasks()).size(); + if (jobs > 0) { + console.printInfo("Query ID = " + plan.getQueryId()); + console.printInfo("Total jobs = " + jobs); + } + if (SessionState.get() != null) { + SessionState + .get() + .getHiveHistory() + .setQueryProperty(queryId, Keys.QUERY_NUM_TASKS, + String.valueOf(jobs)); + SessionState.get().getHiveHistory() + .setIdToTableMap(plan.getIdToTableNameMap()); + } + String jobname = Utilities.abbreviate(queryStr, maxlen - 6); + + // A runtime that launches runnable tasks as separate Threads + // through + // TaskRunners + // As soon as a task isRunnable, it is put in a queue + // At any time, at most maxthreads tasks can be running + // The main thread polls the TaskRunners to check if they have + // finished. + + DriverContext driverCxt = new DriverContext(ctx); + driverCxt.prepare(plan); + + ctx.setHDFSCleanup(true); + + this.driverCxt = driverCxt; // for canceling the query (should be + // bound to session?) + + SessionState.get().setMapRedStats( + new LinkedHashMap()); + SessionState.get().setStackTraces( + new HashMap>>()); + SessionState.get().setLocalMapRedErrors( + new HashMap>()); + + // Add root Tasks to runnable + for (Task tsk : plan.getRootTasks()) { + // This should never happen, if it does, it's a bug with the + // potential to produce + // incorrect results. + assert tsk.getParentTasks() == null + || tsk.getParentTasks().isEmpty(); + driverCxt.addToRunnable(tsk); + } + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TIME_TO_SUBMIT); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RUN_TASKS); + // Loop while you either have tasks running, or tasks queued up + while (!destroyed && driverCxt.isRunning()) { + + // Launch upto maxthreads tasks + Task task; + while ((task = driverCxt.getRunnable(maxthreads)) != null) { + perfLogger.PerfLogBegin( + CLASS_NAME, + PerfLogger.TASK + task.getName() + "." + + task.getId()); + TaskRunner runner = launchTask(task, queryId, noName, + jobname, jobs, driverCxt); + if (!runner.isRunning()) { + break; + } + } + + // poll the Tasks to see which one completed + TaskRunner tskRun = driverCxt.pollFinished(); + if (tskRun == null) { + continue; + } + hookContext.addCompleteTask(tskRun); + + Task tsk = tskRun.getTask(); + TaskResult result = tskRun.getTaskResult(); + + int exitVal = result.getExitVal(); + if (exitVal != 0) { + if (tsk.ifRetryCmdWhenFail()) { + driverCxt.shutdown(); + // in case we decided to run everything in local mode, + // restore the + // the jobtracker setting to its initial value + ctx.restoreOriginalTracker(); + throw new CommandNeedRetryException(); + } + Task backupTask = tsk + .getAndInitBackupTask(); + if (backupTask != null) { + setErrorMsgAndDetail(exitVal, result.getTaskError(), + tsk); + console.printError(errorMessage); + errorMessage = "ATTEMPT: Execute BackupTask: " + + backupTask.getClass().getName(); + console.printError(errorMessage); + + // add backup task to runnable + if (DriverContext.isLaunchable(backupTask)) { + driverCxt.addToRunnable(backupTask); + } + continue; + + } else { + hookContext + .setHookType(HookContext.HookType.ON_FAILURE_HOOK); + // Get all the failure execution hooks and execute them. + for (Hook ofh : getHooks(HiveConf.ConfVars.ONFAILUREHOOKS)) { + perfLogger.PerfLogBegin(CLASS_NAME, + PerfLogger.FAILURE_HOOK + + ofh.getClass().getName()); + + ((ExecuteWithHookContext) ofh).run(hookContext); + + perfLogger.PerfLogEnd(CLASS_NAME, + PerfLogger.FAILURE_HOOK + + ofh.getClass().getName()); + } + setErrorMsgAndDetail(exitVal, result.getTaskError(), + tsk); + SQLState = "08S01"; + console.printError(errorMessage); + driverCxt.shutdown(); + // in case we decided to run everything in local mode, + // restore the + // the jobtracker setting to its initial value + ctx.restoreOriginalTracker(); + return exitVal; + } + } + + driverCxt.finished(tskRun); + + if (SessionState.get() != null) { + SessionState + .get() + .getHiveHistory() + .setTaskProperty(queryId, tsk.getId(), + Keys.TASK_RET_CODE, String.valueOf(exitVal)); + SessionState.get().getHiveHistory().endTask(queryId, tsk); + } + + if (tsk.getChildTasks() != null) { + for (Task child : tsk + .getChildTasks()) { + if (DriverContext.isLaunchable(child)) { + driverCxt.addToRunnable(child); + } + } + } + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RUN_TASKS); + + // in case we decided to run everything in local mode, restore the + // the jobtracker setting to its initial value + ctx.restoreOriginalTracker(); + + if (driverCxt.isShutdown()) { + SQLState = "HY008"; + errorMessage = "FAILED: Operation cancelled"; + console.printError(errorMessage); + return 1000; + } + + // remove incomplete outputs. + // Some incomplete outputs may be added at the beginning, for eg: + // for dynamic partitions. + // remove them + HashSet remOutputs = new HashSet(); + for (WriteEntity output : plan.getOutputs()) { + if (!output.isComplete()) { + remOutputs.add(output); + } + } + + for (WriteEntity output : remOutputs) { + plan.getOutputs().remove(output); + } + + hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK); + // Get all the post execution hooks and execute them. + for (Hook peh : getHooks(HiveConf.ConfVars.POSTEXECHOOKS)) { + if (peh instanceof ExecuteWithHookContext) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.POST_HOOK + + peh.getClass().getName()); + + ((ExecuteWithHookContext) peh).run(hookContext); + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.POST_HOOK + + peh.getClass().getName()); + } else if (peh instanceof PostExecute) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.POST_HOOK + + peh.getClass().getName()); + + ((PostExecute) peh) + .run(SessionState.get(), + plan.getInputs(), + plan.getOutputs(), + (SessionState.get() != null ? SessionState + .get().getLineageState() + .getLineageInfo() : null), + ShimLoader.getHadoopShims().getUGIForConf( + conf)); + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.POST_HOOK + + peh.getClass().getName()); + } + } + + if (SessionState.get() != null) { + SessionState + .get() + .getHiveHistory() + .setQueryProperty(queryId, Keys.QUERY_RET_CODE, + String.valueOf(0)); + SessionState.get().getHiveHistory().printRowCount(queryId); + } + } catch (CommandNeedRetryException e) { + throw e; + } catch (Exception e) { + ctx.restoreOriginalTracker(); + if (SessionState.get() != null) { + SessionState + .get() + .getHiveHistory() + .setQueryProperty(queryId, Keys.QUERY_RET_CODE, + String.valueOf(12)); + } + // TODO: do better with handling types of Exception here + errorMessage = "FAILED: Hive Internal Error: " + + Utilities.getNameMessage(e); + SQLState = "08S01"; + downstreamError = e; + console.printError(errorMessage + "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return (12); + } finally { + if (SessionState.get() != null) { + SessionState.get().getHiveHistory().endQuery(queryId); + } + if (noName) { + conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, ""); + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_EXECUTE); + + Map stats = SessionState.get() + .getMapRedStats(); + if (stats != null && !stats.isEmpty()) { + long totalCpu = 0; + console.printInfo("MapReduce Jobs Launched: "); + for (Map.Entry entry : stats.entrySet()) { + console.printInfo("Stage-" + entry.getKey() + ": " + + entry.getValue()); + totalCpu += entry.getValue().getCpuMSec(); + } + console.printInfo("Total MapReduce CPU Time Spent: " + + Utilities.formatMsecToStr(totalCpu)); + } + } + plan.setDone(); + + if (SessionState.get() != null) { + try { + SessionState.get().getLineageState().clear(); + SessionState.get().getHiveHistory().logPlanProgress(plan); + } catch (Exception e) { + // ignore + } + } + console.printInfo("OK"); + + return (0); + } + + private void setErrorMsgAndDetail(int exitVal, Throwable downstreamError, + Task tsk) { + this.downstreamError = downstreamError; + errorMessage = "FAILED: Execution Error, return code " + exitVal + + " from " + tsk.getClass().getName(); + if (downstreamError != null) { + // here we assume that upstream code may have parametrized the msg + // from ErrorMsg + // so we want to keep it + errorMessage += ". " + downstreamError.getMessage(); + } else { + ErrorMsg em = ErrorMsg.getErrorMsg(exitVal); + if (em != null) { + errorMessage += ". " + em.getMsg(); + } + } + } + + /** + * Launches a new task + * + * @param tsk + * task being launched + * @param queryId + * Id of the query containing the task + * @param noName + * whether the task has a name set + * @param jobname + * name of the task, if it is a map-reduce job + * @param jobs + * number of map-reduce jobs + * @param cxt + * the driver context + */ + private TaskRunner launchTask(Task tsk, + String queryId, boolean noName, String jobname, int jobs, + DriverContext cxt) throws HiveException { + if (SessionState.get() != null) { + SessionState.get().getHiveHistory() + .startTask(queryId, tsk, tsk.getClass().getName()); + } + if (tsk.isMapRedTask() && !(tsk instanceof ConditionalTask)) { + if (noName) { + conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname + "(" + + tsk.getId() + ")"); + } + conf.set("mapreduce.workflow.node.name", tsk.getId()); + Utilities.setWorkflowAdjacencies(conf, plan); + cxt.incCurJobNo(1); + console.printInfo("Launching Job " + cxt.getCurJobNo() + " out of " + + jobs); + } + tsk.initialize(conf, plan, cxt); + TaskResult tskRes = new TaskResult(); + TaskRunner tskRun = new TaskRunner(tsk, tskRes); + + cxt.launching(tskRun); + // Launch Task + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.EXECPARALLEL) + && tsk.isMapRedTask()) { + // Launch it in the parallel mode, as a separate thread only for MR + // tasks + tskRun.start(); + } else { + tskRun.runSequential(); + } + return tskRun; + } + + public boolean isFetchingTable() { + return plan != null && plan.getFetchTask() != null; + } + + @SuppressWarnings("unchecked") + public boolean getResults(List res) throws IOException, + CommandNeedRetryException { + if (destroyed) { + throw new IOException("FAILED: Operation cancelled"); + } + if (isFetchingTable()) { + FetchTask ft = plan.getFetchTask(); + ft.setMaxRows(maxRows); + return ft.fetch(res); + } + + if (resStream == null) { + resStream = ctx.getStream(); + } + if (resStream == null) { + return false; + } + + int numRows = 0; + String row = null; + + while (numRows < maxRows) { + if (resStream == null) { + if (numRows > 0) { + return true; + } else { + return false; + } + } + + bos.reset(); + Utilities.StreamStatus ss; + try { + ss = Utilities.readColumn(resStream, bos); + if (bos.getLength() > 0) { + row = new String(bos.getData(), 0, bos.getLength(), "UTF-8"); + } else if (ss == Utilities.StreamStatus.TERMINATED) { + row = new String(); + } + + if (row != null) { + numRows++; + res.add(row); + } + row = null; + } catch (IOException e) { + console.printError("FAILED: Unexpected IO exception : " + + e.getMessage()); + return false; + } + + if (ss == Utilities.StreamStatus.EOF) { + resStream = ctx.getStream(); + } + } + return true; + } + + public void resetFetch() throws IOException { + if (plan != null && plan.getFetchTask() != null) { + try { + plan.getFetchTask().clearFetch(); + } catch (Exception e) { + throw new IOException("Error closing the current fetch task", e); + } + plan.getFetchTask().initialize(conf, plan, null); + } else { + ctx.resetStream(); + resStream = null; + } + } + + public int getTryCount() { + return tryCount; + } + + public void setTryCount(int tryCount) { + this.tryCount = tryCount; + } + + public int close() { + try { + if (plan != null) { + FetchTask fetchTask = plan.getFetchTask(); + if (null != fetchTask) { + try { + fetchTask.clearFetch(); + } catch (Exception e) { + LOG.debug(" Exception while clearing the Fetch task ", + e); + } + } + } + if (driverCxt != null) { + driverCxt.shutdown(); + driverCxt = null; + } + if (ctx != null) { + ctx.clear(); + } + if (null != resStream) { + try { + ((FSDataInputStream) resStream).close(); + } catch (Exception e) { + LOG.debug(" Exception while closing the resStream ", e); + } + } + } catch (Exception e) { + console.printError("FAILED: Hive Internal Error: " + + Utilities.getNameMessage(e) + "\n" + + org.apache.hadoop.util.StringUtils.stringifyException(e)); + return 13; + } + + return 0; + } + + public void destroy() { + if (destroyed) { + return; + } + destroyed = true; + if (ctx != null) { + try { + releaseLocks(ctx.getHiveLocks()); + } catch (LockException e) { + LOG.warn("Exception when releasing locking in destroy: " + + e.getMessage()); + } + } + if (txnMgr != null) { + txnMgr.closeTxnManager(); + } + } + + public org.apache.hadoop.hive.ql.plan.api.Query getQueryPlan() + throws IOException { + return plan.getQueryPlan(); + } + + public String getErrorMsg() { + return errorMessage; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java index 94afaba..4e910b9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java @@ -22,6 +22,8 @@ import java.io.Serializable; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.List; import org.apache.commons.logging.Log; @@ -65,319 +67,345 @@ * ColumnStatsTask implementation. **/ -public class ColumnStatsTask extends Task implements Serializable { - private static final long serialVersionUID = 1L; - private FetchOperator ftOp; - private static transient final Log LOG = LogFactory.getLog(ColumnStatsTask.class); - - public ColumnStatsTask() { - super(); - } - - @Override - public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) { - super.initialize(conf, queryPlan, ctx); - work.initializeForFetch(); - try { - JobConf job = new JobConf(conf); - ftOp = new FetchOperator(work.getfWork(), job); - } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); - throw new RuntimeException(e); - } - } - - private void unpackBooleanStats(ObjectInspector oi, Object o, String fName, - ColumnStatisticsObj statsObj) { - long v = ((LongObjectInspector) oi).get(o); - if (fName.equals("counttrues")) { - statsObj.getStatsData().getBooleanStats().setNumTrues(v); - } else if (fName.equals("countfalses")) { - statsObj.getStatsData().getBooleanStats().setNumFalses(v); - } else if (fName.equals("countnulls")) { - statsObj.getStatsData().getBooleanStats().setNumNulls(v); - } - } - - private void unpackDoubleStats(ObjectInspector oi, Object o, String fName, - ColumnStatisticsObj statsObj) { - if (fName.equals("countnulls")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getDoubleStats().setNumNulls(v); - } else if (fName.equals("numdistinctvalues")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getDoubleStats().setNumDVs(v); - } else if (fName.equals("max")) { - double d = ((DoubleObjectInspector) oi).get(o); - statsObj.getStatsData().getDoubleStats().setHighValue(d); - } else if (fName.equals("min")) { - double d = ((DoubleObjectInspector) oi).get(o); - statsObj.getStatsData().getDoubleStats().setLowValue(d); - } - } - - private void unpackDecimalStats(ObjectInspector oi, Object o, String fName, - ColumnStatisticsObj statsObj) { - if (fName.equals("countnulls")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getDecimalStats().setNumNulls(v); - } else if (fName.equals("numdistinctvalues")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getDecimalStats().setNumDVs(v); - } else if (fName.equals("max")) { - HiveDecimal d = ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o); - statsObj.getStatsData().getDecimalStats().setHighValue(convertToThriftDecimal(d)); - } else if (fName.equals("min")) { - HiveDecimal d = ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o); - statsObj.getStatsData().getDecimalStats().setLowValue(convertToThriftDecimal(d)); - } - } - - private Decimal convertToThriftDecimal(HiveDecimal d) { - return new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), (short)d.scale()); - } - - private void unpackLongStats(ObjectInspector oi, Object o, String fName, - ColumnStatisticsObj statsObj) { - if (fName.equals("countnulls")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getLongStats().setNumNulls(v); - } else if (fName.equals("numdistinctvalues")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getLongStats().setNumDVs(v); - } else if (fName.equals("max")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getLongStats().setHighValue(v); - } else if (fName.equals("min")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getLongStats().setLowValue(v); - } - } - - private void unpackStringStats(ObjectInspector oi, Object o, String fName, - ColumnStatisticsObj statsObj) { - if (fName.equals("countnulls")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getStringStats().setNumNulls(v); - } else if (fName.equals("numdistinctvalues")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getStringStats().setNumDVs(v); - } else if (fName.equals("avglength")) { - double d = ((DoubleObjectInspector) oi).get(o); - statsObj.getStatsData().getStringStats().setAvgColLen(d); - } else if (fName.equals("maxlength")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getStringStats().setMaxColLen(v); - } - } - - private void unpackBinaryStats(ObjectInspector oi, Object o, String fName, - ColumnStatisticsObj statsObj) { - if (fName.equals("countnulls")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getBinaryStats().setNumNulls(v); - } else if (fName.equals("avglength")) { - double d = ((DoubleObjectInspector) oi).get(o); - statsObj.getStatsData().getBinaryStats().setAvgColLen(d); - } else if (fName.equals("maxlength")) { - long v = ((LongObjectInspector) oi).get(o); - statsObj.getStatsData().getBinaryStats().setMaxColLen(v); - } - } - - private void unpackPrimitiveObject (ObjectInspector oi, Object o, String fieldName, - ColumnStatisticsObj statsObj) { - if (o == null) { - return; - } - // First infer the type of object - if (fieldName.equals("columntype")) { - PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi; - String s = ((StringObjectInspector) poi).getPrimitiveJavaObject(o); - ColumnStatisticsData statsData = new ColumnStatisticsData(); - - if (s.equalsIgnoreCase("long")) { - LongColumnStatsData longStats = new LongColumnStatsData(); - statsData.setLongStats(longStats); - statsObj.setStatsData(statsData); - } else if (s.equalsIgnoreCase("double")) { - DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); - statsData.setDoubleStats(doubleStats); - statsObj.setStatsData(statsData); - } else if (s.equalsIgnoreCase("string")) { - StringColumnStatsData stringStats = new StringColumnStatsData(); - statsData.setStringStats(stringStats); - statsObj.setStatsData(statsData); - } else if (s.equalsIgnoreCase("boolean")) { - BooleanColumnStatsData booleanStats = new BooleanColumnStatsData(); - statsData.setBooleanStats(booleanStats); - statsObj.setStatsData(statsData); - } else if (s.equalsIgnoreCase("binary")) { - BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); - statsData.setBinaryStats(binaryStats); - statsObj.setStatsData(statsData); - } else if (s.equalsIgnoreCase("decimal")) { - DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); - statsData.setDecimalStats(decimalStats); - statsObj.setStatsData(statsData); - } - } else { - // invoke the right unpack method depending on data type of the column - if (statsObj.getStatsData().isSetBooleanStats()) { - unpackBooleanStats(oi, o, fieldName, statsObj); - } else if (statsObj.getStatsData().isSetLongStats()) { - unpackLongStats(oi, o, fieldName, statsObj); - } else if (statsObj.getStatsData().isSetDoubleStats()) { - unpackDoubleStats(oi,o,fieldName, statsObj); - } else if (statsObj.getStatsData().isSetStringStats()) { - unpackStringStats(oi, o, fieldName, statsObj); - } else if (statsObj.getStatsData().isSetBinaryStats()) { - unpackBinaryStats(oi, o, fieldName, statsObj); - } else if (statsObj.getStatsData().isSetDecimalStats()) { - unpackDecimalStats(oi, o, fieldName, statsObj); - } - } - } - - private void unpackStructObject(ObjectInspector oi, Object o, String fName, - ColumnStatisticsObj cStatsObj) { - if (oi.getCategory() != ObjectInspector.Category.STRUCT) { - throw new RuntimeException("Invalid object datatype : " + oi.getCategory().toString()); - } - - StructObjectInspector soi = (StructObjectInspector) oi; - List fields = soi.getAllStructFieldRefs(); - List list = soi.getStructFieldsDataAsList(o); - - for (int i = 0; i < fields.size(); i++) { - // Get the field objectInspector, fieldName and the field object. - ObjectInspector foi = fields.get(i).getFieldObjectInspector(); - Object f = (list == null ? null : list.get(i)); - String fieldName = fields.get(i).getFieldName(); - - if (foi.getCategory() == ObjectInspector.Category.PRIMITIVE) { - unpackPrimitiveObject(foi, f, fieldName, cStatsObj); - } else { - unpackStructObject(foi, f, fieldName, cStatsObj); - } - } - } - - private List constructColumnStatsFromPackedRows() throws HiveException, MetaException, IOException { - - String dbName = SessionState.get().getCurrentDatabase(); - String tableName = work.getColStats().getTableName(); - String partName = null; - List colName = work.getColStats().getColName(); - List colType = work.getColStats().getColType(); - boolean isTblLevel = work.getColStats().isTblLevel(); - - List stats = new ArrayList(); - InspectableObject packedRow; - while ((packedRow = ftOp.getNextRow()) != null) { - if (packedRow.oi.getCategory() != ObjectInspector.Category.STRUCT) { - throw new HiveException("Unexpected object type encountered while unpacking row"); - } - - List statsObjs = new ArrayList(); - StructObjectInspector soi = (StructObjectInspector) packedRow.oi; - List fields = soi.getAllStructFieldRefs(); - List list = soi.getStructFieldsDataAsList(packedRow.o); - - Table tbl = db.getTable(dbName,tableName); - List partColSchema = tbl.getPartCols(); - // Partition columns are appended at end, we only care about stats column - int numOfStatCols = isTblLevel ? fields.size() : fields.size() - partColSchema.size(); - for (int i = 0; i < numOfStatCols; i++) { - // Get the field objectInspector, fieldName and the field object. - ObjectInspector foi = fields.get(i).getFieldObjectInspector(); - Object f = (list == null ? null : list.get(i)); - String fieldName = fields.get(i).getFieldName(); - ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); - statsObj.setColName(colName.get(i)); - statsObj.setColType(colType.get(i)); - unpackStructObject(foi, f, fieldName, statsObj); - statsObjs.add(statsObj); - } - - if (!isTblLevel) { - List partVals = new ArrayList(); - // Iterate over partition columns to figure out partition name - for (int i = fields.size() - partColSchema.size(); i < fields.size(); i++) { - partVals.add(((PrimitiveObjectInspector)fields.get(i).getFieldObjectInspector()). - getPrimitiveJavaObject(list.get(i)).toString()); - } - partName = Warehouse.makePartName(partColSchema, partVals); - } - - ColumnStatisticsDesc statsDesc = getColumnStatsDesc(dbName, tableName, partName, isTblLevel); - ColumnStatistics colStats = new ColumnStatistics(); - colStats.setStatsDesc(statsDesc); - colStats.setStatsObj(statsObjs); - stats.add(colStats); - } - ftOp.clearFetchContext(); - return stats; - } - - private ColumnStatisticsDesc getColumnStatsDesc(String dbName, String tableName, - String partName, boolean isTblLevel) - { - ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); - statsDesc.setDbName(dbName); - statsDesc.setTableName(tableName); - statsDesc.setIsTblLevel(isTblLevel); - - if (!isTblLevel) { - statsDesc.setPartName(partName); - } else { - statsDesc.setPartName(null); - } - return statsDesc; - } - - private int persistPartitionStats() throws HiveException, MetaException, IOException { - - // Fetch result of the analyze table partition (p1=c1).. compute statistics for columns .. - // Construct a column statistics object from the result - List colStats = constructColumnStatsFromPackedRows(); - // Persist the column statistics object to the metastore - for (ColumnStatistics colStat : colStats) { - db.updatePartitionColumnStatistics(colStat); - } - return 0; - } - - private int persistTableStats() throws HiveException, MetaException, IOException { - // Fetch result of the analyze table .. compute statistics for columns .. - // Construct a column statistics object from the result - ColumnStatistics colStats = constructColumnStatsFromPackedRows().get(0); - // Persist the column statistics object to the metastore - db.updateTableColumnStatistics(colStats); - return 0; - } - - @Override - public int execute(DriverContext driverContext) { - try { - if (work.getColStats().isTblLevel()) { - return persistTableStats(); - } else { - return persistPartitionStats(); - } - } catch (Exception e) { - LOG.info(e); - } - return 1; - } - - @Override - public StageType getType() { - return StageType.COLUMNSTATS; - } - - @Override - public String getName() { - return "COLUMNSTATS TASK"; - } +public class ColumnStatsTask extends Task implements + Serializable { + private static final long serialVersionUID = 1L; + private FetchOperator ftOp; + private static transient final Log LOG = LogFactory + .getLog(ColumnStatsTask.class); + + public ColumnStatsTask() { + super(); + } + + @Override + public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) { + super.initialize(conf, queryPlan, ctx); + work.initializeForFetch(); + try { + JobConf job = new JobConf(conf); + ftOp = new FetchOperator(work.getfWork(), job); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new RuntimeException(e); + } + } + + private void unpackBooleanStats(ObjectInspector oi, Object o, String fName, + ColumnStatisticsObj statsObj) { + long v = ((LongObjectInspector) oi).get(o); + if (fName.equals("counttrues")) { + + statsObj.getStatsData().getBooleanStats().setNumTrues(v); + } else if (fName.equals("countfalses")) { + + statsObj.getStatsData().getBooleanStats().setNumFalses(v); + } else if (fName.equals("countnulls")) { + + statsObj.getStatsData().getBooleanStats().setNumNulls(v); + } + + } + + private void unpackDoubleStats(ObjectInspector oi, Object o, String fName, + ColumnStatisticsObj statsObj) { + if (fName.equals("countnulls")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getDoubleStats().setNumNulls(v); + } else if (fName.equals("numdistinctvalues")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getDoubleStats().setNumDVs(v); + } else if (fName.equals("max")) { + double d = ((DoubleObjectInspector) oi).get(o); + statsObj.getStatsData().getDoubleStats().setHighValue(d); + } else if (fName.equals("min")) { + double d = ((DoubleObjectInspector) oi).get(o); + statsObj.getStatsData().getDoubleStats().setLowValue(d); + } + } + + private void unpackDecimalStats(ObjectInspector oi, Object o, String fName, + ColumnStatisticsObj statsObj) { + if (fName.equals("countnulls")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getDecimalStats().setNumNulls(v); + } else if (fName.equals("numdistinctvalues")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getDecimalStats().setNumDVs(v); + } else if (fName.equals("max")) { + HiveDecimal d = ((HiveDecimalObjectInspector) oi) + .getPrimitiveJavaObject(o); + statsObj.getStatsData().getDecimalStats() + .setHighValue(convertToThriftDecimal(d)); + } else if (fName.equals("min")) { + HiveDecimal d = ((HiveDecimalObjectInspector) oi) + .getPrimitiveJavaObject(o); + statsObj.getStatsData().getDecimalStats() + .setLowValue(convertToThriftDecimal(d)); + } + } + + private Decimal convertToThriftDecimal(HiveDecimal d) { + return new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), + (short) d.scale()); + } + + private void unpackLongStats(ObjectInspector oi, Object o, String fName, + ColumnStatisticsObj statsObj) { + if (fName.equals("countnulls")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getLongStats().setNumNulls(v); + } else if (fName.equals("numdistinctvalues")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getLongStats().setNumDVs(v); + } else if (fName.equals("max")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getLongStats().setHighValue(v); + } else if (fName.equals("min")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getLongStats().setLowValue(v); + } + } + + private void unpackStringStats(ObjectInspector oi, Object o, String fName, + ColumnStatisticsObj statsObj) { + if (fName.equals("countnulls")) { + long v = ((LongObjectInspector) oi).get(o); + + statsObj.getStatsData().getStringStats().setNumNulls(v); + } else if (fName.equals("numdistinctvalues")) { + long v = ((LongObjectInspector) oi).get(o); + + statsObj.getStatsData().getStringStats().setNumDVs(v); + } else if (fName.equals("avglength")) { + double d = ((DoubleObjectInspector) oi).get(o); + + statsObj.getStatsData().getStringStats().setAvgColLen(d); + } else if (fName.equals("maxlength")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getStringStats().setMaxColLen(v); + } + } + + private void unpackBinaryStats(ObjectInspector oi, Object o, String fName, + ColumnStatisticsObj statsObj) { + if (fName.equals("countnulls")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getBinaryStats().setNumNulls(v); + } else if (fName.equals("avglength")) { + double d = ((DoubleObjectInspector) oi).get(o); + statsObj.getStatsData().getBinaryStats().setAvgColLen(d); + } else if (fName.equals("maxlength")) { + long v = ((LongObjectInspector) oi).get(o); + statsObj.getStatsData().getBinaryStats().setMaxColLen(v); + } + } + + private void unpackPrimitiveObject(ObjectInspector oi, Object o, + String fieldName, ColumnStatisticsObj statsObj) { + if (o == null) { + return; + } + // First infer the type of object + if (fieldName.equals("columntype")) { + PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi; + String s = ((StringObjectInspector) poi).getPrimitiveJavaObject(o); + ColumnStatisticsData statsData = new ColumnStatisticsData(); + if (s.equalsIgnoreCase("long")) { + LongColumnStatsData longStats = new LongColumnStatsData(); + statsData.setLongStats(longStats); + statsObj.setStatsData(statsData); + } else if (s.equalsIgnoreCase("double")) { + DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); + statsData.setDoubleStats(doubleStats); + statsObj.setStatsData(statsData); + } else if (s.equalsIgnoreCase("string")) { + StringColumnStatsData stringStats = new StringColumnStatsData(); + statsData.setStringStats(stringStats); + statsObj.setStatsData(statsData); + } else if (s.equalsIgnoreCase("boolean")) { + BooleanColumnStatsData booleanStats = new BooleanColumnStatsData(); + statsData.setBooleanStats(booleanStats); + statsObj.setStatsData(statsData); + } else if (s.equalsIgnoreCase("binary")) { + BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); + statsData.setBinaryStats(binaryStats); + statsObj.setStatsData(statsData); + } else if (s.equalsIgnoreCase("decimal")) { + DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); + statsData.setDecimalStats(decimalStats); + statsObj.setStatsData(statsData); + } + } else { + // invoke the right unpack method depending on data type of the + // column + if (statsObj.getStatsData().isSetBooleanStats()) { + unpackBooleanStats(oi, o, fieldName, statsObj); + } else if (statsObj.getStatsData().isSetLongStats()) { + unpackLongStats(oi, o, fieldName, statsObj); + } else if (statsObj.getStatsData().isSetDoubleStats()) { + unpackDoubleStats(oi, o, fieldName, statsObj); + } else if (statsObj.getStatsData().isSetStringStats()) { + unpackStringStats(oi, o, fieldName, statsObj); + } else if (statsObj.getStatsData().isSetBinaryStats()) { + unpackBinaryStats(oi, o, fieldName, statsObj); + } else if (statsObj.getStatsData().isSetDecimalStats()) { + unpackDecimalStats(oi, o, fieldName, statsObj); + } + } + } + + private void unpackStructObject(ObjectInspector oi, Object o, String fName, + ColumnStatisticsObj cStatsObj) { + if (oi.getCategory() != ObjectInspector.Category.STRUCT) { + throw new RuntimeException("Invalid object datatype : " + + oi.getCategory().toString()); + } + + StructObjectInspector soi = (StructObjectInspector) oi; + List fields = soi.getAllStructFieldRefs(); + List list = soi.getStructFieldsDataAsList(o); + + for (int i = 0; i < fields.size(); i++) { + // Get the field objectInspector, fieldName and the field object. + ObjectInspector foi = fields.get(i).getFieldObjectInspector(); + Object f = (list == null ? null : list.get(i)); + String fieldName = fields.get(i).getFieldName(); + + if (foi.getCategory() == ObjectInspector.Category.PRIMITIVE) { + unpackPrimitiveObject(foi, f, fieldName, cStatsObj); + } else { + unpackStructObject(foi, f, fieldName, cStatsObj); + } + } + } + + private List constructColumnStatsFromPackedRows() + throws HiveException, MetaException, IOException { + + String dbName = SessionState.get().getCurrentDatabase(); + String tableName = work.getColStats().getTableName(); + String partName = null; + List colName = work.getColStats().getColName(); + List colType = work.getColStats().getColType(); + boolean isTblLevel = work.getColStats().isTblLevel(); + + List stats = new ArrayList(); + InspectableObject packedRow; + while ((packedRow = ftOp.getNextRow()) != null) { + if (packedRow.oi.getCategory() != ObjectInspector.Category.STRUCT) { + throw new HiveException( + "Unexpected object type encountered while unpacking row"); + } + + List statsObjs = new ArrayList(); + StructObjectInspector soi = (StructObjectInspector) packedRow.oi; + List fields = soi.getAllStructFieldRefs(); + List list = soi.getStructFieldsDataAsList(packedRow.o); + + Table tbl = db.getTable(dbName, tableName); + List partColSchema = tbl.getPartCols(); + // Partition columns are appended at end, we only care about stats + // column + int numOfStatCols = isTblLevel ? fields.size() : fields.size() + - partColSchema.size(); + for (int i = 0; i < numOfStatCols; i++) { + // Get the field objectInspector, fieldName and the field + // object. + ObjectInspector foi = fields.get(i).getFieldObjectInspector(); + Object f = (list == null ? null : list.get(i)); + String fieldName = fields.get(i).getFieldName(); + ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); + statsObj.setColName(colName.get(i)); + statsObj.setColType(colType.get(i)); + unpackStructObject(foi, f, fieldName, statsObj); + statsObjs.add(statsObj); + } + + if (!isTblLevel) { + List partVals = new ArrayList(); + // Iterate over partition columns to figure out partition name + for (int i = fields.size() - partColSchema.size(); i < fields + .size(); i++) { + partVals.add(((PrimitiveObjectInspector) fields.get(i) + .getFieldObjectInspector()).getPrimitiveJavaObject( + list.get(i)).toString()); + } + partName = Warehouse.makePartName(partColSchema, partVals); + } + + ColumnStatisticsDesc statsDesc = getColumnStatsDesc(dbName, + tableName, partName, isTblLevel); + ColumnStatistics colStats = new ColumnStatistics(); + colStats.setStatsDesc(statsDesc); + colStats.setStatsObj(statsObjs); + stats.add(colStats); + } + ftOp.clearFetchContext(); + return stats; + } + + private ColumnStatisticsDesc getColumnStatsDesc(String dbName, + String tableName, String partName, boolean isTblLevel) { + ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); + statsDesc.setDbName(dbName); + statsDesc.setTableName(tableName); + statsDesc.setIsTblLevel(isTblLevel); + + if (!isTblLevel) { + statsDesc.setPartName(partName); + } else { + statsDesc.setPartName(null); + } + return statsDesc; + } + + private int persistPartitionStats() throws HiveException, MetaException, + IOException { + + // Fetch result of the analyze table partition (p1=c1).. compute + // statistics for columns .. + // Construct a column statistics object from the result + List colStats = constructColumnStatsFromPackedRows(); + // Persist the column statistics object to the metastore + for (ColumnStatistics colStat : colStats) { + db.updatePartitionColumnStatistics(colStat); + } + return 0; + } + + private int persistTableStats() throws HiveException, MetaException, + IOException { + // Fetch result of the analyze table .. compute statistics for columns + // .. + // Construct a column statistics object from the result + ColumnStatistics colStats = constructColumnStatsFromPackedRows().get(0); + // Persist the column statistics object to the metastore + db.updateTableColumnStatistics(colStats); + return 0; + } + + @Override + public int execute(DriverContext driverContext) { + try { + if (work.getColStats().isTblLevel()) { + return persistTableStats(); + } else { + return persistPartitionStats(); + } + } catch (Exception e) { + LOG.info(e); + } + return 1; + } + + @Override + public StageType getType() { + return StageType.COLUMNSTATS; + } + + @Override + public String getName() { + return "COLUMNSTATS TASK"; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java index 8d56c35..b124907 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; @@ -236,9 +237,13 @@ private ColumnStatistics constructColumnStatsFromInput() { } else if (fName.equals("numDVs")) { decimalStats.setNumDVs(Long.parseLong(value)); } else if (fName.equals("lowValue")) { - // decimalStats.setLowValue(new Decimal(value)); + BigDecimal d = new BigDecimal(value); + decimalStats.setLowValue(new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), + (short) d.scale())); } else if (fName.equals("highValue")) { - // decimalStats.setHighValue(new Decimal(value)); + BigDecimal d = new BigDecimal(value); + decimalStats.setHighValue(new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), + (short) d.scale())); } } statsData.setDecimalStats(decimalStats); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index ad6e19c..e1e3bab 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.ql.io.rcfile.merge.MergeWork; import org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanTask; import org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanWork; +import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.ColumnStatsWork; import org.apache.hadoop.hive.ql.plan.ConditionalWork; import org.apache.hadoop.hive.ql.plan.CopyWork; @@ -92,6 +93,7 @@ public TaskTuple(Class workClass, Class> taskClass) { StatsTask.class)); taskvec.add(new TaskTuple(StatsNoJobWork.class, StatsNoJobTask.class)); taskvec.add(new TaskTuple(ColumnStatsWork.class, ColumnStatsTask.class)); + taskvec.add(new TaskTuple(ColumnStatsUpdateWork.class, ColumnStatsUpdateTask.class)); taskvec.add(new TaskTuple(MergeWork.class, BlockMergeTask.class)); taskvec.add(new TaskTuple(DependencyCollectionWork.class, diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java index e83bc17..2b746ff 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java @@ -112,6 +112,7 @@ public int execute(DriverContext driverContext) { LOG.debug("Task: " + getId() + ", Summary: " + totalInputFileSize + "," + totalInputNumFiles + "," + numReducers); + } String reason = MapRedTask.isEligibleForLocalMode(conf, numReducers, @@ -121,9 +122,11 @@ public int execute(DriverContext driverContext) { cloneConf(); ShimLoader.getHadoopShims().setJobLauncherRpcAddress(conf, "local"); console.printInfo("Selecting local mode for task: " + getId()); + this.setLocalMode(true); } else { console.printInfo("Cannot run job locally: " + reason); + this.setLocalMode(false); } } @@ -173,6 +176,7 @@ public int execute(DriverContext driverContext) { OutputStream out = FileSystem.getLocal(conf).create(planPath); MapredWork plan = getWork(); LOG.info("Generating plan file " + planPath.toString()); + Utilities.serializePlan(plan, out, conf); String isSilent = "true".equalsIgnoreCase(System @@ -205,6 +209,7 @@ public int execute(DriverContext driverContext) { } LOG.info("Executing: " + cmdLine); + // Inherit Java system variables String hadoopOpts; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 37b1669..5caab45 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -14,7 +14,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.conf.HiveConf;; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.HiveMetaHookLoader; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index c0322fb..d779ee1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -25,6 +25,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; @@ -57,6 +58,8 @@ import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; +import org.apache.hadoop.hive.ql.exec.ColumnStatsTask; +import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; @@ -90,6 +93,9 @@ import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc; +import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; +import org.apache.hadoop.hive.ql.plan.ColumnStatsWork; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; @@ -156,3230 +162,3580 @@ * */ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { - private static final Log LOG = LogFactory.getLog(DDLSemanticAnalyzer.class); - private static final Map TokenToTypeName = new HashMap(); - - private final Set reservedPartitionValues; - private final HiveAuthorizationTaskFactory hiveAuthorizationTaskFactory; - - static { - TokenToTypeName.put(HiveParser.TOK_BOOLEAN, serdeConstants.BOOLEAN_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_TINYINT, serdeConstants.TINYINT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_SMALLINT, serdeConstants.SMALLINT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_INT, serdeConstants.INT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_BIGINT, serdeConstants.BIGINT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_FLOAT, serdeConstants.FLOAT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_DOUBLE, serdeConstants.DOUBLE_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_STRING, serdeConstants.STRING_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_CHAR, serdeConstants.CHAR_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_VARCHAR, serdeConstants.VARCHAR_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_BINARY, serdeConstants.BINARY_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_DATE, serdeConstants.DATE_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_DATETIME, serdeConstants.DATETIME_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_DECIMAL, serdeConstants.DECIMAL_TYPE_NAME); - } - - public static String getTypeName(ASTNode node) throws SemanticException { - int token = node.getType(); - String typeName; - - // datetime type isn't currently supported - if (token == HiveParser.TOK_DATETIME) { - throw new SemanticException(ErrorMsg.UNSUPPORTED_TYPE.getMsg()); - } - - switch (token) { - case HiveParser.TOK_CHAR: - CharTypeInfo charTypeInfo = ParseUtils.getCharTypeInfo(node); - typeName = charTypeInfo.getQualifiedName(); - break; - case HiveParser.TOK_VARCHAR: - VarcharTypeInfo varcharTypeInfo = ParseUtils.getVarcharTypeInfo(node); - typeName = varcharTypeInfo.getQualifiedName(); - break; - case HiveParser.TOK_DECIMAL: - DecimalTypeInfo decTypeInfo = ParseUtils.getDecimalTypeTypeInfo(node); - typeName = decTypeInfo.getQualifiedName(); - break; - default: - typeName = TokenToTypeName.get(token); - } - return typeName; - } - - static class TablePartition { - String tableName; - HashMap partSpec = null; - - public TablePartition() { - } - - public TablePartition(ASTNode tblPart) throws SemanticException { - tableName = unescapeIdentifier(tblPart.getChild(0).getText()); - if (tblPart.getChildCount() > 1) { - ASTNode part = (ASTNode) tblPart.getChild(1); - if (part.getToken().getType() == HiveParser.TOK_PARTSPEC) { - this.partSpec = DDLSemanticAnalyzer.getPartSpec(part); - } - } - } - } - - public DDLSemanticAnalyzer(HiveConf conf) throws SemanticException { - this(conf, createHiveDB(conf)); - } - - public DDLSemanticAnalyzer(HiveConf conf, Hive db) throws SemanticException { - super(conf, db); - reservedPartitionValues = new HashSet(); - // Partition can't have this name - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME)); - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_ZOOKEEPER_PARTITION_NAME)); - // Partition value can't end in this suffix - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL)); - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED)); - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED)); - hiveAuthorizationTaskFactory = new HiveAuthorizationTaskFactoryImpl(conf, db); - } - - @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { - - switch (ast.getToken().getType()) { - case HiveParser.TOK_ALTERTABLE_PARTITION: { - ASTNode tablePart = (ASTNode) ast.getChild(0); - TablePartition tblPart = new TablePartition(tablePart); - String tableName = tblPart.tableName; - HashMap partSpec = tblPart.partSpec; - ast = (ASTNode) ast.getChild(1); - if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { - analyzeAlterTableFileFormat(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROTECTMODE) { - analyzeAlterTableProtectMode(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) { - analyzeAlterTableLocation(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) { - analyzeAlterTablePartMergeFiles(tablePart, ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER) { - analyzeAlterTableSerde(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) { - analyzeAlterTableSerdeProps(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { - analyzeAlterTableRenamePart(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION) { - analyzeAlterTableSkewedLocation(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_TABLEBUCKETS) { - analyzeAlterTableBucketNum(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { - analyzeAlterTableClusterSort(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_COMPACT) { - analyzeAlterTableCompact(ast, tableName, partSpec); - } - break; - } - case HiveParser.TOK_DROPTABLE: - analyzeDropTable(ast, false); - break; - case HiveParser.TOK_TRUNCATETABLE: - analyzeTruncateTable(ast); - break; - case HiveParser.TOK_CREATEINDEX: - analyzeCreateIndex(ast); - break; - case HiveParser.TOK_DROPINDEX: - analyzeDropIndex(ast); - break; - case HiveParser.TOK_DESCTABLE: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeDescribeTable(ast); - break; - case HiveParser.TOK_SHOWDATABASES: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowDatabases(ast); - break; - case HiveParser.TOK_SHOWTABLES: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowTables(ast); - break; - case HiveParser.TOK_SHOWCOLUMNS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowColumns(ast); - break; - case HiveParser.TOK_SHOW_TABLESTATUS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowTableStatus(ast); - break; - case HiveParser.TOK_SHOW_TBLPROPERTIES: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowTableProperties(ast); - break; - case HiveParser.TOK_SHOWFUNCTIONS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowFunctions(ast); - break; - case HiveParser.TOK_SHOWLOCKS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowLocks(ast); - break; - case HiveParser.TOK_SHOWDBLOCKS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowDbLocks(ast); - break; - case HiveParser.TOK_SHOW_COMPACTIONS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowCompactions(ast); - break; - case HiveParser.TOK_SHOW_TRANSACTIONS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowTxns(ast); - break; - case HiveParser.TOK_SHOWCONF: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowConf(ast); - break; - case HiveParser.TOK_DESCFUNCTION: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeDescFunction(ast); - break; - case HiveParser.TOK_DESCDATABASE: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeDescDatabase(ast); - break; - case HiveParser.TOK_MSCK: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeMetastoreCheck(ast); - break; - case HiveParser.TOK_DROPVIEW: - analyzeDropTable(ast, true); - break; - case HiveParser.TOK_ALTERVIEW_PROPERTIES: - analyzeAlterTableProps(ast, true, false); - break; - case HiveParser.TOK_DROPVIEW_PROPERTIES: - analyzeAlterTableProps(ast, true, true); - break; - case HiveParser.TOK_ALTERVIEW_ADDPARTS: - // for ALTER VIEW ADD PARTITION, we wrapped the ADD to discriminate - // view from table; unwrap it now - analyzeAlterTableAddParts((ASTNode) ast.getChild(0), true); - break; - case HiveParser.TOK_ALTERVIEW_DROPPARTS: - // for ALTER VIEW DROP PARTITION, we wrapped the DROP to discriminate - // view from table; unwrap it now - analyzeAlterTableDropParts((ASTNode) ast.getChild(0), true); - break; - case HiveParser.TOK_ALTERVIEW_RENAME: - // for ALTER VIEW RENAME, we wrapped the RENAME to discriminate - // view from table; unwrap it now - analyzeAlterTableRename(((ASTNode) ast.getChild(0)), true); - break; - case HiveParser.TOK_ALTERTABLE_RENAME: - analyzeAlterTableRename(ast, false); - break; - case HiveParser.TOK_ALTERTABLE_TOUCH: - analyzeAlterTableTouch(ast); - break; - case HiveParser.TOK_ALTERTABLE_ARCHIVE: - analyzeAlterTableArchive(ast, false); - break; - case HiveParser.TOK_ALTERTABLE_UNARCHIVE: - analyzeAlterTableArchive(ast, true); - break; - case HiveParser.TOK_ALTERTABLE_ADDCOLS: - analyzeAlterTableModifyCols(ast, AlterTableTypes.ADDCOLS); - break; - case HiveParser.TOK_ALTERTABLE_REPLACECOLS: - analyzeAlterTableModifyCols(ast, AlterTableTypes.REPLACECOLS); - break; - case HiveParser.TOK_ALTERTABLE_RENAMECOL: - analyzeAlterTableRenameCol(ast); - break; - case HiveParser.TOK_ALTERTABLE_ADDPARTS: - analyzeAlterTableAddParts(ast, false); - break; - case HiveParser.TOK_ALTERTABLE_DROPPARTS: - analyzeAlterTableDropParts(ast, false); - break; - case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE: - analyzeAlterTablePartColType(ast); - break; - case HiveParser.TOK_ALTERTABLE_PROPERTIES: - analyzeAlterTableProps(ast, false, false); - break; - case HiveParser.TOK_DROPTABLE_PROPERTIES: - analyzeAlterTableProps(ast, false, true); - break; - case HiveParser.TOK_ALTERINDEX_REBUILD: - analyzeAlterIndexRebuild(ast); - break; - case HiveParser.TOK_ALTERINDEX_PROPERTIES: - analyzeAlterIndexProps(ast); - break; - case HiveParser.TOK_SHOWPARTITIONS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowPartitions(ast); - break; - case HiveParser.TOK_SHOW_CREATETABLE: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowCreateTable(ast); - break; - case HiveParser.TOK_SHOWINDEXES: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowIndexes(ast); - break; - case HiveParser.TOK_LOCKTABLE: - analyzeLockTable(ast); - break; - case HiveParser.TOK_UNLOCKTABLE: - analyzeUnlockTable(ast); - break; - case HiveParser.TOK_LOCKDB: - analyzeLockDatabase(ast); - break; - case HiveParser.TOK_UNLOCKDB: - analyzeUnlockDatabase(ast); - break; - case HiveParser.TOK_CREATEDATABASE: - analyzeCreateDatabase(ast); - break; - case HiveParser.TOK_DROPDATABASE: - analyzeDropDatabase(ast); - break; - case HiveParser.TOK_SWITCHDATABASE: - analyzeSwitchDatabase(ast); - break; - case HiveParser.TOK_ALTERDATABASE_PROPERTIES: - analyzeAlterDatabaseProperties(ast); - break; - case HiveParser.TOK_ALTERDATABASE_OWNER: - analyzeAlterDatabaseOwner(ast); - break; - case HiveParser.TOK_CREATEROLE: - analyzeCreateRole(ast); - break; - case HiveParser.TOK_DROPROLE: - analyzeDropRole(ast); - break; - case HiveParser.TOK_SHOW_ROLE_GRANT: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowRoleGrant(ast); - break; - case HiveParser.TOK_SHOW_ROLE_PRINCIPALS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowRolePrincipals(ast); - break; - case HiveParser.TOK_SHOW_ROLES: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowRoles(ast); - break; - case HiveParser.TOK_GRANT_ROLE: - analyzeGrantRevokeRole(true, ast); - break; - case HiveParser.TOK_REVOKE_ROLE: - analyzeGrantRevokeRole(false, ast); - break; - case HiveParser.TOK_GRANT: - analyzeGrant(ast); - break; - case HiveParser.TOK_SHOW_GRANT: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowGrant(ast); - break; - case HiveParser.TOK_REVOKE: - analyzeRevoke(ast); - break; - case HiveParser.TOK_ALTERTABLE_SKEWED: - analyzeAltertableSkewedby(ast); - break; - case HiveParser.TOK_EXCHANGEPARTITION: - analyzeExchangePartition(ast); - break; - case HiveParser.TOK_SHOW_SET_ROLE: - analyzeSetShowRole(ast); - break; - default: - throw new SemanticException("Unsupported command."); - } - if (fetchTask != null && !rootTasks.isEmpty()) { - rootTasks.get(rootTasks.size() - 1).setFetchSource(true); - } - } - - private void analyzeSetShowRole(ASTNode ast) throws SemanticException { - switch (ast.getChildCount()) { - case 0: - ctx.setResFile(ctx.getLocalTmpPath()); - rootTasks.add(hiveAuthorizationTaskFactory.createShowCurrentRoleTask( - getInputs(), getOutputs(), ctx.getResFile())); - setFetchTask(createFetchTask(RoleDDLDesc.getRoleNameSchema())); - break; - case 1: - rootTasks.add(hiveAuthorizationTaskFactory.createSetRoleTask( - BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(0).getText()), - getInputs(), getOutputs())); - break; - default: - throw new SemanticException("Internal error. ASTNode expected to have 0 or 1 child. " - + ast.dump()); - } - } - - private void analyzeGrantRevokeRole(boolean grant, ASTNode ast) throws SemanticException { - Task task; - if(grant) { - task = hiveAuthorizationTaskFactory.createGrantRoleTask(ast, getInputs(), getOutputs()); - } else { - task = hiveAuthorizationTaskFactory.createRevokeRoleTask(ast, getInputs(), getOutputs()); - } - if(task != null) { - rootTasks.add(task); - } - } - - private void analyzeShowGrant(ASTNode ast) throws SemanticException { - Task task = hiveAuthorizationTaskFactory. - createShowGrantTask(ast, ctx.getResFile(), getInputs(), getOutputs()); - if(task != null) { - rootTasks.add(task); - setFetchTask(createFetchTask(ShowGrantDesc.getSchema())); - } - } - - private void analyzeGrant(ASTNode ast) throws SemanticException { - Task task = hiveAuthorizationTaskFactory. - createGrantTask(ast, getInputs(), getOutputs()); - if(task != null) { - rootTasks.add(task); - } - } - - private void analyzeRevoke(ASTNode ast) throws SemanticException { - Task task = hiveAuthorizationTaskFactory. - createRevokeTask(ast, getInputs(), getOutputs()); - if(task != null) { - rootTasks.add(task); - } - } - - private void analyzeCreateRole(ASTNode ast) throws SemanticException { - Task task = hiveAuthorizationTaskFactory. - createCreateRoleTask(ast, getInputs(), getOutputs()); - if(task != null) { - rootTasks.add(task); - } - } - - private void analyzeDropRole(ASTNode ast) throws SemanticException { - Task task = hiveAuthorizationTaskFactory. - createDropRoleTask(ast, getInputs(), getOutputs()); - if(task != null) { - rootTasks.add(task); - } - } - - private void analyzeShowRoleGrant(ASTNode ast) throws SemanticException { - Task task = hiveAuthorizationTaskFactory. - createShowRoleGrantTask(ast, ctx.getResFile(), getInputs(), getOutputs()); - if(task != null) { - rootTasks.add(task); - setFetchTask(createFetchTask(RoleDDLDesc.getRoleShowGrantSchema())); - } - } - - private void analyzeShowRolePrincipals(ASTNode ast) throws SemanticException { - Task roleDDLTask = (Task) hiveAuthorizationTaskFactory - .createShowRolePrincipalsTask(ast, ctx.getResFile(), getInputs(), getOutputs()); - - if (roleDDLTask != null) { - rootTasks.add(roleDDLTask); - setFetchTask(createFetchTask(RoleDDLDesc.getShowRolePrincipalsSchema())); - } - } - - private void analyzeShowRoles(ASTNode ast) throws SemanticException { - Task roleDDLTask = (Task) hiveAuthorizationTaskFactory - .createShowRolesTask(ast, ctx.getResFile(), getInputs(), getOutputs()); - - if (roleDDLTask != null) { - rootTasks.add(roleDDLTask); - setFetchTask(createFetchTask(RoleDDLDesc.getRoleNameSchema())); - } - } - - private void analyzeAlterDatabaseProperties(ASTNode ast) throws SemanticException { - - String dbName = unescapeIdentifier(ast.getChild(0).getText()); - Map dbProps = null; - - for (int i = 1; i < ast.getChildCount(); i++) { - ASTNode childNode = (ASTNode) ast.getChild(i); - switch (childNode.getToken().getType()) { - case HiveParser.TOK_DATABASEPROPERTIES: - dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0)); - break; - default: - throw new SemanticException("Unrecognized token in CREATE DATABASE statement"); - } - } - AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, dbProps); - addAlterDbDesc(alterDesc); - } - - private void addAlterDbDesc(AlterDatabaseDesc alterDesc) throws SemanticException { - Database database = getDatabase(alterDesc.getDatabaseName()); - outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterDesc), conf)); - } - - private void analyzeAlterDatabaseOwner(ASTNode ast) throws SemanticException { - String dbName = getUnescapedName((ASTNode) ast.getChild(0)); - PrincipalDesc principalDesc = AuthorizationParseUtils.getPrincipalDesc((ASTNode) ast - .getChild(1)); - - // The syntax should not allow these fields to be null, but lets verify - String nullCmdMsg = "can't be null in alter database set owner command"; - if(principalDesc.getName() == null){ - throw new SemanticException("Owner name " + nullCmdMsg); - } - if(principalDesc.getType() == null){ - throw new SemanticException("Owner type " + nullCmdMsg); - } - - AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, principalDesc); - addAlterDbDesc(alterDesc); - } - - private void analyzeExchangePartition(ASTNode ast) throws SemanticException { - Table destTable = getTable(getUnescapedName((ASTNode)ast.getChild(0))); - Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(2))); - - // Get the partition specs - Map partSpecs = getPartSpec((ASTNode) ast.getChild(1)); - validatePartitionValues(partSpecs); - boolean sameColumns = MetaStoreUtils.compareFieldColumns( - destTable.getAllCols(), sourceTable.getAllCols()); - boolean samePartitions = MetaStoreUtils.compareFieldColumns( - destTable.getPartitionKeys(), sourceTable.getPartitionKeys()); - if (!sameColumns || !samePartitions) { - throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg()); - } - // check if source partition exists - getPartitions(sourceTable, partSpecs, true); - - // Verify that the partitions specified are continuous - // If a subpartition value is specified without specifying a partition's value - // then we throw an exception - int counter = isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs); - if (counter < 0) { - throw new SemanticException( - ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString())); - } - List destPartitions = null; - try { - destPartitions = getPartitions(destTable, partSpecs, true); - } catch (SemanticException ex) { - // We should expect a semantic exception being throw as this partition - // should not be present. - } - if (destPartitions != null) { - // If any destination partition is present then throw a Semantic Exception. - throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString())); - } - AlterTableExchangePartition alterTableExchangePartition = - new AlterTableExchangePartition(sourceTable, destTable, partSpecs); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTableExchangePartition), conf)); - } - - /** - * @param partitionKeys the list of partition keys of the table - * @param partSpecs the partition specs given by the user - * @return >=0 if no subpartition value is specified without a partition's - * value being specified else it returns -1 - */ - private int isPartitionValueContinuous(List partitionKeys, - Map partSpecs) { - int counter = 0; - for (FieldSchema partitionKey : partitionKeys) { - if (partSpecs.containsKey(partitionKey.getName())) { - counter++; - continue; - } - return partSpecs.size() == counter ? counter : -1; - } - return counter; - } - - private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { - String dbName = unescapeIdentifier(ast.getChild(0).getText()); - boolean ifNotExists = false; - String dbComment = null; - String dbLocation = null; - Map dbProps = null; - - for (int i = 1; i < ast.getChildCount(); i++) { - ASTNode childNode = (ASTNode) ast.getChild(i); - switch (childNode.getToken().getType()) { - case HiveParser.TOK_IFNOTEXISTS: - ifNotExists = true; - break; - case HiveParser.TOK_DATABASECOMMENT: - dbComment = unescapeSQLString(childNode.getChild(0).getText()); - break; - case TOK_DATABASEPROPERTIES: - dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0)); - break; - case TOK_DATABASELOCATION: - dbLocation = unescapeSQLString(childNode.getChild(0).getText()); - addLocationToOutputs(dbLocation); - break; - default: - throw new SemanticException("Unrecognized token in CREATE DATABASE statement"); - } - } - - CreateDatabaseDesc createDatabaseDesc = - new CreateDatabaseDesc(dbName, dbComment, dbLocation, ifNotExists); - if (dbProps != null) { - createDatabaseDesc.setDatabaseProperties(dbProps); - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - createDatabaseDesc), conf)); - } - - private void analyzeDropDatabase(ASTNode ast) throws SemanticException { - String dbName = unescapeIdentifier(ast.getChild(0).getText()); - boolean ifExists = false; - boolean ifCascade = false; - - if (null != ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS)) { - ifExists = true; - } - - if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { - ifCascade = true; - } - - Database database = getDatabase(dbName, !ifExists); - if (database == null) { - return; - } - - // if cascade=true, then we need to authorize the drop table action as well - if (ifCascade) { - // add the tables as well to outputs - List tableNames; - // get names of all tables under this dbName - try { - tableNames = db.getAllTables(dbName); - } catch (HiveException e) { - throw new SemanticException(e); - } - // add tables to outputs - if (tableNames != null) { - for (String tableName : tableNames) { - Table table = getTable(dbName, tableName, true); - // We want no lock here, as the database lock will cover the tables, - // and putting a lock will actually cause us to deadlock on ourselves. - outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } - } - inputs.add(new ReadEntity(database)); - outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_EXCLUSIVE)); - - DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists, ifCascade); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc), conf)); - } - - private void analyzeSwitchDatabase(ASTNode ast) { - String dbName = unescapeIdentifier(ast.getChild(0).getText()); - SwitchDatabaseDesc switchDatabaseDesc = new SwitchDatabaseDesc(dbName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - switchDatabaseDesc), conf)); - } - - - - private void analyzeDropTable(ASTNode ast, boolean expectView) - throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); - // we want to signal an error if the table/view doesn't exist and we're - // configured not to fail silently - boolean throwException = - !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); - Table tab = getTable(tableName, throwException); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } - - DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - dropTblDesc), conf)); - } - - private void analyzeTruncateTable(ASTNode ast) throws SemanticException { - ASTNode root = (ASTNode) ast.getChild(0); // TOK_TABLE_PARTITION - String tableName = getUnescapedName((ASTNode) root.getChild(0)); - - Table table = getTable(tableName, true); - if (table.getTableType() != TableType.MANAGED_TABLE) { - throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_MANAGED_TABLE.format(tableName)); - } - if (table.isNonNative()) { - throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); //TODO - } - if (!table.isPartitioned() && root.getChildCount() > 1) { - throw new SemanticException(ErrorMsg.PARTSPEC_FOR_NON_PARTITIONED_TABLE.format(tableName)); - } - Map partSpec = getPartSpec((ASTNode) root.getChild(1)); - if (partSpec == null) { - if (!table.isPartitioned()) { - outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } else { - for (Partition partition : getPartitions(table, null, false)) { - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } - } - } else { - if (isFullSpec(table, partSpec)) { - Partition partition = getPartition(table, partSpec, true); - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } else { - for (Partition partition : getPartitions(table, partSpec, false)) { - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } - } - } - - TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec); - - DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc); - Task truncateTask = TaskFactory.get(ddlWork, conf); - - // Is this a truncate column command - List columnNames = null; - if (ast.getChildCount() == 2) { - try { - columnNames = getColumnNames((ASTNode)ast.getChild(1)); - - // Throw an error if the table is indexed - List indexes = db.getIndexes(table.getDbName(), tableName, (short)1); - if (indexes != null && indexes.size() > 0) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg()); - } - - List bucketCols = null; - Class inputFormatClass = null; - boolean isArchived = false; - Path newTblPartLoc = null; - Path oldTblPartLoc = null; - List cols = null; - ListBucketingCtx lbCtx = null; - boolean isListBucketed = false; - List listBucketColNames = null; - - if (table.isPartitioned()) { - Partition part = db.getPartition(table, partSpec, false); - - Path tabPath = table.getPath(); - Path partPath = part.getDataLocation(); - - // if the table is in a different dfs than the partition, - // replace the partition's dfs with the table's dfs. - newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri() - .getAuthority(), partPath.toUri().getPath()); - - oldTblPartLoc = partPath; - - cols = part.getCols(); - bucketCols = part.getBucketCols(); - inputFormatClass = part.getInputFormatClass(); - isArchived = ArchiveUtils.isArchived(part); - lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), - part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf); - isListBucketed = part.isStoredAsSubDirectories(); - listBucketColNames = part.getSkewedColNames(); - } else { - // input and output are the same - oldTblPartLoc = table.getPath(); - newTblPartLoc = table.getPath(); - cols = table.getCols(); - bucketCols = table.getBucketCols(); - inputFormatClass = table.getInputFormatClass(); - lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(), - table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf); - isListBucketed = table.isStoredAsSubDirectories(); - listBucketColNames = table.getSkewedColNames(); - } - - // throw a HiveException for non-rcfile. - if (!inputFormatClass.equals(RCFileInputFormat.class)) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg()); - } - - // throw a HiveException if the table/partition is archived - if (isArchived) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg()); - } - - Set columnIndexes = new HashSet(); - for (String columnName : columnNames) { - boolean found = false; - for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) { - if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) { - columnIndexes.add(columnIndex); - found = true; - break; - } - } - // Throw an exception if the user is trying to truncate a column which doesn't exist - if (!found) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName)); - } - // Throw an exception if the table/partition is bucketed on one of the columns - for (String bucketCol : bucketCols) { - if (bucketCol.equalsIgnoreCase(columnName)) { - throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName)); - } - } - if (isListBucketed) { - for (String listBucketCol : listBucketColNames) { - if (listBucketCol.equalsIgnoreCase(columnName)) { - throw new SemanticException( - ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName)); - } - } - } - } - - truncateTblDesc.setColumnIndexes(new ArrayList(columnIndexes)); - - truncateTblDesc.setInputDir(oldTblPartLoc); - addInputsOutputsAlterTable(tableName, partSpec); - - truncateTblDesc.setLbCtx(lbCtx); - - addInputsOutputsAlterTable(tableName, partSpec); - ddlWork.setNeedLock(true); - TableDesc tblDesc = Utilities.getTableDesc(table); - // Write the output to temporary directory and move it to the final location at the end - // so the operation is atomic. - Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri()); - truncateTblDesc.setOutputDir(queryTmpdir); - LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap() : partSpec); - ltd.setLbCtx(lbCtx); - Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), - conf); - truncateTask.addDependentTask(moveTsk); - - // Recalculate the HDFS stats if auto gather stats is set - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsWork statDesc; - if (oldTblPartLoc.equals(newTblPartLoc)) { - // If we're merging to the same location, we can avoid some metastore calls - tableSpec tablepart = new tableSpec(this.db, conf, root); - statDesc = new StatsWork(tablepart); - } else { - statDesc = new StatsWork(ltd); - } - statDesc.setNoStatsAggregator(true); - statDesc.setClearAggregatorStats(true); - statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE)); - Task statTask = TaskFactory.get(statDesc, conf); - moveTsk.addDependentTask(statTask); - } - } catch (HiveException e) { - throw new SemanticException(e); - } - } - - rootTasks.add(truncateTask); - } - - private boolean isFullSpec(Table table, Map partSpec) { - for (FieldSchema partCol : table.getPartCols()) { - if (partSpec.get(partCol.getName()) == null) { - return false; - } - } - return true; - } - - private void analyzeCreateIndex(ASTNode ast) throws SemanticException { - String indexName = unescapeIdentifier(ast.getChild(0).getText()); - String typeName = unescapeSQLString(ast.getChild(1).getText()); - String tableName = getUnescapedName((ASTNode) ast.getChild(2)); - List indexedCols = getColumnNames((ASTNode) ast.getChild(3)); - - IndexType indexType = HiveIndex.getIndexType(typeName); - if (indexType != null) { - typeName = indexType.getHandlerClsName(); - } else { - try { - Class.forName(typeName); - } catch (Exception e) { - throw new SemanticException("class name provided for index handler not found.", e); - } - } - - String indexTableName = null; - boolean deferredRebuild = false; - String location = null; - Map tblProps = null; - Map idxProps = null; - String indexComment = null; - - RowFormatParams rowFormatParams = new RowFormatParams(); - StorageFormat storageFormat = new StorageFormat(conf); - - for (int idx = 4; idx < ast.getChildCount(); idx++) { - ASTNode child = (ASTNode) ast.getChild(idx); - if (storageFormat.fillStorageFormat(child)) { - continue; - } - switch (child.getToken().getType()) { - case HiveParser.TOK_TABLEROWFORMAT: - rowFormatParams.analyzeRowFormat(child); - break; - case HiveParser.TOK_CREATEINDEX_INDEXTBLNAME: - ASTNode ch = (ASTNode) child.getChild(0); - indexTableName = getUnescapedName(ch); - break; - case HiveParser.TOK_DEFERRED_REBUILDINDEX: - deferredRebuild = true; - break; - case HiveParser.TOK_TABLELOCATION: - location = unescapeSQLString(child.getChild(0).getText()); - addLocationToOutputs(location); - break; - case HiveParser.TOK_TABLEPROPERTIES: - tblProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0)); - break; - case HiveParser.TOK_INDEXPROPERTIES: - idxProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0)); - break; - case HiveParser.TOK_TABLESERIALIZER: - child = (ASTNode) child.getChild(0); - storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText())); - if (child.getChildCount() == 2) { - readProps((ASTNode) (child.getChild(1).getChild(0)), - storageFormat.getSerdeProps()); - } - break; - case HiveParser.TOK_INDEXCOMMENT: - child = (ASTNode) child.getChild(0); - indexComment = unescapeSQLString(child.getText()); - } - } - - storageFormat.fillDefaultStorageFormat(); - - - CreateIndexDesc crtIndexDesc = new CreateIndexDesc(tableName, indexName, - indexedCols, indexTableName, deferredRebuild, storageFormat.getInputFormat(), - storageFormat.getOutputFormat(), - storageFormat.getStorageHandler(), typeName, location, idxProps, tblProps, - storageFormat.getSerde(), storageFormat.getSerdeProps(), rowFormatParams.collItemDelim, - rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, - rowFormatParams.lineDelim, rowFormatParams.mapKeyDelim, indexComment); - Task createIndex = - TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtIndexDesc), conf); - rootTasks.add(createIndex); - } - - private void analyzeDropIndex(ASTNode ast) throws SemanticException { - String indexName = unescapeIdentifier(ast.getChild(0).getText()); - String tableName = getUnescapedName((ASTNode) ast.getChild(1)); - boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); - // we want to signal an error if the index doesn't exist and we're - // configured not to ignore this - boolean throwException = - !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); - if (throwException) { - try { - Index idx = db.getIndex(tableName, indexName); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_INDEX.getMsg(indexName)); - } - } - - DropIndexDesc dropIdxDesc = new DropIndexDesc(indexName, tableName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - dropIdxDesc), conf)); - } - - private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { - String baseTableName = unescapeIdentifier(ast.getChild(0).getText()); - String indexName = unescapeIdentifier(ast.getChild(1).getText()); - HashMap partSpec = null; - Tree part = ast.getChild(2); - if (part != null) { - partSpec = extractPartitionSpecs(part); - } - List> indexBuilder = getIndexBuilderMapRed(baseTableName, indexName, partSpec); - rootTasks.addAll(indexBuilder); - - // Handle updating index timestamps - AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.UPDATETIMESTAMP); - alterIdxDesc.setIndexName(indexName); - alterIdxDesc.setBaseTableName(baseTableName); - alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase()); - alterIdxDesc.setSpec(partSpec); - - Task tsTask = TaskFactory.get(new DDLWork(alterIdxDesc), conf); - for (Task t : indexBuilder) { - t.addDependentTask(tsTask); - } - } - - private void analyzeAlterIndexProps(ASTNode ast) - throws SemanticException { - - String baseTableName = getUnescapedName((ASTNode) ast.getChild(0)); - String indexName = unescapeIdentifier(ast.getChild(1).getText()); - HashMap mapProp = getProps((ASTNode) (ast.getChild(2)) - .getChild(0)); - - AlterIndexDesc alterIdxDesc = - new AlterIndexDesc(AlterIndexTypes.ADDPROPS); - alterIdxDesc.setProps(mapProp); - alterIdxDesc.setIndexName(indexName); - alterIdxDesc.setBaseTableName(baseTableName); - alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase()); - - rootTasks.add(TaskFactory.get(new DDLWork(alterIdxDesc), conf)); - } - - private List> getIndexBuilderMapRed(String baseTableName, String indexName, - HashMap partSpec) throws SemanticException { - try { - String dbName = SessionState.get().getCurrentDatabase(); - Index index = db.getIndex(dbName, baseTableName, indexName); - Table indexTbl = getTable(index.getIndexTableName()); - String baseTblName = index.getOrigTableName(); - Table baseTbl = getTable(baseTblName); - - String handlerCls = index.getIndexHandlerClass(); - HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls); - - List indexTblPartitions = null; - List baseTblPartitions = null; - if (indexTbl != null) { - indexTblPartitions = new ArrayList(); - baseTblPartitions = preparePartitions(baseTbl, partSpec, - indexTbl, db, indexTblPartitions); - } - - List> ret = handler.generateIndexBuildTaskList(baseTbl, - index, indexTblPartitions, baseTblPartitions, indexTbl, getInputs(), getOutputs()); - return ret; - } catch (Exception e) { - throw new SemanticException(e); - } - } - - private List preparePartitions( - org.apache.hadoop.hive.ql.metadata.Table baseTbl, - HashMap partSpec, - org.apache.hadoop.hive.ql.metadata.Table indexTbl, Hive db, - List indexTblPartitions) - throws HiveException, MetaException { - List baseTblPartitions = new ArrayList(); - if (partSpec != null) { - // if partspec is specified, then only producing index for that - // partition - Partition part = db.getPartition(baseTbl, partSpec, false); - if (part == null) { - throw new HiveException("Partition " - + Warehouse.makePartName(partSpec, false) - + " does not exist in table " - + baseTbl.getTableName()); - } - baseTblPartitions.add(part); - Partition indexPart = db.getPartition(indexTbl, partSpec, false); - if (indexPart == null) { - indexPart = db.createPartition(indexTbl, partSpec); - } - indexTblPartitions.add(indexPart); - } else if (baseTbl.isPartitioned()) { - // if no partition is specified, create indexes for all partitions one - // by one. - baseTblPartitions = db.getPartitions(baseTbl); - for (Partition basePart : baseTblPartitions) { - HashMap pSpec = basePart.getSpec(); - Partition indexPart = db.getPartition(indexTbl, pSpec, false); - if (indexPart == null) { - indexPart = db.createPartition(indexTbl, pSpec); - } - indexTblPartitions.add(indexPart); - } - } - return baseTblPartitions; - } - - private void validateAlterTableType(Table tbl, AlterTableTypes op) throws SemanticException { - validateAlterTableType(tbl, op, false); - } - - private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expectView) - throws SemanticException { - if (tbl.isView()) { - if (!expectView) { - throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg()); - } - - switch (op) { - case ADDPARTITION: - case DROPPARTITION: - case RENAMEPARTITION: - case ADDPROPS: - case DROPPROPS: - case RENAME: - // allow this form - break; - default: - throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString())); - } - } else { - if (expectView) { - throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg()); - } - } - if (tbl.isNonNative()) { - throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.getMsg(tbl.getTableName())); - } - } - - private void analyzeAlterTableProps(ASTNode ast, boolean expectView, boolean isUnset) - throws SemanticException { - - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) - .getChild(0)); - AlterTableDesc alterTblDesc = null; - if (isUnset == true) { - alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, expectView); - if (ast.getChild(2) != null) { - alterTblDesc.setDropIfExists(true); - } - } else { - alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, expectView); - } - alterTblDesc.setProps(mapProp); - alterTblDesc.setOldName(tableName); - - addInputsOutputsAlterTable(tableName, null, alterTblDesc); - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName, - HashMap partSpec) - throws SemanticException { - HashMap mapProp = getProps((ASTNode) (ast.getChild(0)) - .getChild(0)); - AlterTableDesc alterTblDesc = new AlterTableDesc( - AlterTableTypes.ADDSERDEPROPS); - alterTblDesc.setProps(mapProp); - alterTblDesc.setOldName(tableName); - alterTblDesc.setPartSpec(partSpec); - - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void analyzeAlterTableSerde(ASTNode ast, String tableName, - HashMap partSpec) - throws SemanticException { - - String serdeName = unescapeSQLString(ast.getChild(0).getText()); - AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDSERDE); - if (ast.getChildCount() > 1) { - HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) - .getChild(0)); - alterTblDesc.setProps(mapProp); - } - alterTblDesc.setOldName(tableName); - alterTblDesc.setSerdeName(serdeName); - alterTblDesc.setPartSpec(partSpec); - - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, - HashMap partSpec) - throws SemanticException { - - StorageFormat format = new StorageFormat(conf); - ASTNode child = (ASTNode) ast.getChild(0); - - if (!format.fillStorageFormat(child)) { - throw new AssertionError("Unknown token " + child.getText()); - } - - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, format.getInputFormat(), - format.getOutputFormat(), format.getSerde(), format.getStorageHandler(), partSpec); - - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void addInputsOutputsAlterTable(String tableName, Map partSpec) - throws SemanticException { - addInputsOutputsAlterTable(tableName, partSpec, null); - } - - private void addInputsOutputsAlterTable(String tableName, Map partSpec, - AlterTableDesc desc) throws SemanticException { - Table tab = getTable(tableName, true); - // Determine the lock type to acquire - WriteEntity.WriteType writeType = desc == null ? WriteEntity.WriteType.DDL_EXCLUSIVE : - WriteEntity.determineAlterTableWriteType(desc.getOp()); - if (partSpec == null || partSpec.isEmpty()) { - inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab, writeType)); - } - else { - ReadEntity re = new ReadEntity(tab); - // In the case of altering a table for its partitions we don't need to lock the table - // itself, just the partitions. But the table will have a ReadEntity. So mark that - // ReadEntity as no lock. - re.noLockNeeded(); - inputs.add(re); - if (desc == null || desc.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { - Partition part = getPartition(tab, partSpec, true); - outputs.add(new WriteEntity(part, writeType)); - } - else { - for (Partition part : getPartitions(tab, partSpec, true)) { - outputs.add(new WriteEntity(part, writeType)); - } - } - } - - if (desc != null) { - validateAlterTableType(tab, desc.getOp(), desc.getExpectView()); - - // validate Unset Non Existed Table Properties - if (desc.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS && - desc.getIsDropIfExists() == false) { - Iterator keyItr = desc.getProps().keySet().iterator(); - while (keyItr.hasNext()) { - String currKey = keyItr.next(); - if (tab.getTTable().getParameters().containsKey(currKey) == false) { - String errorMsg = - "The following property " + currKey + - " does not exist in " + tab.getTableName(); - throw new SemanticException( - ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg)); - } - } - } - } - } - - private void analyzeAlterTableLocation(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { - - String newLocation = unescapeSQLString(ast.getChild(0).getText()); - addLocationToOutputs(newLocation); - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, newLocation, partSpec); - - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - - } - - private void analyzeAlterTableProtectMode(ASTNode ast, String tableName, - HashMap partSpec) - throws SemanticException { - - AlterTableDesc alterTblDesc = - new AlterTableDesc(AlterTableTypes.ALTERPROTECTMODE); - - alterTblDesc.setOldName(tableName); - alterTblDesc.setPartSpec(partSpec); - - ASTNode child = (ASTNode) ast.getChild(0); - - switch (child.getToken().getType()) { - case HiveParser.TOK_ENABLE: - alterTblDesc.setProtectModeEnable(true); - break; - case HiveParser.TOK_DISABLE: - alterTblDesc.setProtectModeEnable(false); - break; - default: - throw new SemanticException( - "Set Protect mode Syntax parsing error."); - } - - ASTNode grandChild = (ASTNode) child.getChild(0); - switch (grandChild.getToken().getType()) { - case HiveParser.TOK_OFFLINE: - alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.OFFLINE); - break; - case HiveParser.TOK_NO_DROP: - if (grandChild.getChildCount() > 0) { - alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP_CASCADE); - } - else { - alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP); - } - break; - case HiveParser.TOK_READONLY: - throw new SemanticException( - "Potect mode READONLY is not implemented"); - default: - throw new SemanticException( - "Only protect mode NO_DROP or OFFLINE supported"); - } - - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void analyzeAlterTablePartMergeFiles(ASTNode tablePartAST, ASTNode ast, - String tableName, HashMap partSpec) - throws SemanticException { - AlterTablePartMergeFilesDesc mergeDesc = new AlterTablePartMergeFilesDesc( - tableName, partSpec); - - List inputDir = new ArrayList(); - Path oldTblPartLoc = null; - Path newTblPartLoc = null; - Table tblObj = null; - ListBucketingCtx lbCtx = null; - - try { - tblObj = getTable(tableName); - - List bucketCols = null; - Class inputFormatClass = null; - boolean isArchived = false; - boolean checkIndex = HiveConf.getBoolVar(conf, - HiveConf.ConfVars.HIVE_CONCATENATE_CHECK_INDEX); - if (checkIndex) { - List indexes = db.getIndexes(tblObj.getDbName(), tableName, - Short.MAX_VALUE); - if (indexes != null && indexes.size() > 0) { - throw new SemanticException("can not do merge because source table " - + tableName + " is indexed."); - } - } - - if (tblObj.isPartitioned()) { - if (partSpec == null) { - throw new SemanticException("source table " + tableName - + " is partitioned but no partition desc found."); - } else { - Partition part = getPartition(tblObj, partSpec, false); - if (part == null) { - throw new SemanticException("source table " + tableName - + " is partitioned but partition not found."); - } - bucketCols = part.getBucketCols(); - inputFormatClass = part.getInputFormatClass(); - isArchived = ArchiveUtils.isArchived(part); - - Path tabPath = tblObj.getPath(); - Path partPath = part.getDataLocation(); - - // if the table is in a different dfs than the partition, - // replace the partition's dfs with the table's dfs. - newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri() - .getAuthority(), partPath.toUri().getPath()); - - oldTblPartLoc = partPath; - - lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), - part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf); - } - } else { - inputFormatClass = tblObj.getInputFormatClass(); - bucketCols = tblObj.getBucketCols(); - - // input and output are the same - oldTblPartLoc = tblObj.getPath(); - newTblPartLoc = tblObj.getPath(); - - lbCtx = constructListBucketingCtx(tblObj.getSkewedColNames(), tblObj.getSkewedColValues(), - tblObj.getSkewedColValueLocationMaps(), tblObj.isStoredAsSubDirectories(), conf); - } - - // throw a HiveException for non-rcfile. - if (!inputFormatClass.equals(RCFileInputFormat.class)) { - throw new SemanticException( - "Only RCFileFormat is supportted right now."); - } - - // throw a HiveException if the table/partition is bucketized - if (bucketCols != null && bucketCols.size() > 0) { - throw new SemanticException( - "Merge can not perform on bucketized partition/table."); - } - - // throw a HiveException if the table/partition is archived - if (isArchived) { - throw new SemanticException( - "Merge can not perform on archived partitions."); - } - - inputDir.add(oldTblPartLoc); - - mergeDesc.setInputDir(inputDir); - - mergeDesc.setLbCtx(lbCtx); - - addInputsOutputsAlterTable(tableName, partSpec); - DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc); - ddlWork.setNeedLock(true); - Task mergeTask = TaskFactory.get(ddlWork, conf); - TableDesc tblDesc = Utilities.getTableDesc(tblObj); - Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri()); - mergeDesc.setOutputDir(queryTmpdir); - LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap() : partSpec); - ltd.setLbCtx(lbCtx); - Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), - conf); - mergeTask.addDependentTask(moveTsk); - - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsWork statDesc; - if (oldTblPartLoc.equals(newTblPartLoc)) { - // If we're merging to the same location, we can avoid some metastore calls - tableSpec tablepart = new tableSpec(this.db, conf, tablePartAST); - statDesc = new StatsWork(tablepart); - } else { - statDesc = new StatsWork(ltd); - } - statDesc.setNoStatsAggregator(true); - statDesc.setClearAggregatorStats(true); - statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE)); - Task statTask = TaskFactory.get(statDesc, conf); - moveTsk.addDependentTask(statTask); - } - - rootTasks.add(mergeTask); - } catch (Exception e) { - throw new SemanticException(e); - } - } - - private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { - addInputsOutputsAlterTable(tableName, partSpec); - - AlterTableDesc alterTblDesc; - switch (ast.getChild(0).getType()) { - case HiveParser.TOK_NOT_CLUSTERED: - alterTblDesc = new AlterTableDesc(tableName, -1, new ArrayList(), - new ArrayList(), partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); - break; - case HiveParser.TOK_NOT_SORTED: - alterTblDesc = new AlterTableDesc(tableName, true, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); - break; - case HiveParser.TOK_TABLEBUCKETS: - ASTNode buckets = (ASTNode) ast.getChild(0); - List bucketCols = getColumnNames((ASTNode) buckets.getChild(0)); - List sortCols = new ArrayList(); - int numBuckets = -1; - if (buckets.getChildCount() == 2) { - numBuckets = (Integer.valueOf(buckets.getChild(1).getText())).intValue(); - } else { - sortCols = getColumnNamesOrder((ASTNode) buckets.getChild(1)); - numBuckets = (Integer.valueOf(buckets.getChild(2).getText())).intValue(); - } - if (numBuckets <= 0) { - throw new SemanticException(ErrorMsg.INVALID_BUCKET_NUMBER.getMsg()); - } - - alterTblDesc = new AlterTableDesc(tableName, numBuckets, - bucketCols, sortCols, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - break; - } - } - - private void analyzeAlterTableCompact(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { - - String type = unescapeSQLString(ast.getChild(0).getText()).toLowerCase(); - - if (!type.equals("minor") && !type.equals("major")) { - throw new SemanticException(ErrorMsg.INVALID_COMPACTION_TYPE.getMsg()); - } - - LinkedHashMap newPartSpec = null; - if (partSpec != null) newPartSpec = new LinkedHashMap(partSpec); - - AlterTableSimpleDesc desc = new AlterTableSimpleDesc(SessionState.get().getCurrentDatabase(), - tableName, newPartSpec, type); - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); - } - - static HashMap getProps(ASTNode prop) { - HashMap mapProp = new HashMap(); - readProps(prop, mapProp); - return mapProp; - } - - /** - * Utility class to resolve QualifiedName - */ - static class QualifiedNameUtil { - - // delimiter to check DOT delimited qualified names - static String delimiter = "\\."; - - /** - * Get the fully qualified name in the ast. e.g. the ast of the form ^(DOT - * ^(DOT a b) c) will generate a name of the form a.b.c - * - * @param ast - * The AST from which the qualified name has to be extracted - * @return String - */ - static public String getFullyQualifiedName(ASTNode ast) { - if (ast.getChildCount() == 0) { - return ast.getText(); - } else if (ast.getChildCount() == 2) { - return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "." - + getFullyQualifiedName((ASTNode) ast.getChild(1)); - } else if (ast.getChildCount() == 3) { - return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "." - + getFullyQualifiedName((ASTNode) ast.getChild(1)) + "." - + getFullyQualifiedName((ASTNode) ast.getChild(2)); - } else { - return null; - } - } - - // assume the first component of DOT delimited name is tableName - // get the attemptTableName - static public String getAttemptTableName(Hive db, String qualifiedName, boolean isColumn) { - // check whether the name starts with table - // DESCRIBE table - // DESCRIBE table.column - // DECRIBE table column - String tableName = qualifiedName.substring(0, - qualifiedName.indexOf('.') == -1 ? - qualifiedName.length() : qualifiedName.indexOf('.')); - try { - Table tab = db.getTable(tableName); - if (tab != null) { - if (isColumn) { - // if attempt to get columnPath - // return the whole qualifiedName(table.column or table) - return qualifiedName; - } else { - // if attempt to get tableName - // return table - return tableName; - } - } - } catch (HiveException e) { - // assume the first DOT delimited component is tableName - // OK if it is not - // do nothing when having exception - return null; - } - return null; - } - - // get Database Name - static public String getDBName(Hive db, ASTNode ast) { - String dbName = null; - String fullyQualifiedName = getFullyQualifiedName(ast); - - // if database.table or database.table.column or table.column - // first try the first component of the DOT separated name - if (ast.getChildCount() >= 2) { - dbName = fullyQualifiedName.substring(0, - fullyQualifiedName.indexOf('.') == -1 ? - fullyQualifiedName.length() : - fullyQualifiedName.indexOf('.')); - try { - // if the database name is not valid - // it is table.column - // return null as dbName - if (!db.databaseExists(dbName)) { - return null; - } - } catch (HiveException e) { - return null; - } - } else { - // in other cases, return null - // database is not validated if null - return null; - } - return dbName; - } - - // get Table Name - static public String getTableName(Hive db, ASTNode ast) - throws SemanticException { - String tableName = null; - String fullyQualifiedName = getFullyQualifiedName(ast); - - // assume the first component of DOT delimited name is tableName - String attemptTableName = getAttemptTableName(db, fullyQualifiedName, false); - if (attemptTableName != null) { - return attemptTableName; - } - - // if the name does not start with table - // it should start with database - // DESCRIBE database.table - // DESCRIBE database.table column - if (fullyQualifiedName.split(delimiter).length == 3) { - // if DESCRIBE database.table.column - // invalid syntax exception - if (ast.getChildCount() == 2) { - throw new SemanticException(ErrorMsg.INVALID_TABLE_OR_COLUMN.getMsg(fullyQualifiedName)); - } else { - // if DESCRIBE database.table column - // return database.table as tableName - tableName = fullyQualifiedName.substring(0, - fullyQualifiedName.lastIndexOf('.')); - } - } else if (fullyQualifiedName.split(delimiter).length == 2) { - // if DESCRIBE database.table - // return database.table as tableName - tableName = fullyQualifiedName; - } else { - // if fullyQualifiedName only have one component - // it is an invalid table - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(fullyQualifiedName)); - } - - return tableName; - } - - // get column path - static public String getColPath( - Hive db, - ASTNode parentAst, - ASTNode ast, - String tableName, - Map partSpec) { - - // if parent has two children - // it could be DESCRIBE table key - // or DESCRIBE table partition - if (parentAst.getChildCount() == 2 && partSpec == null) { - // if partitionSpec is null - // it is DESCRIBE table key - // return table as columnPath - return getFullyQualifiedName(parentAst); - } - - // assume the first component of DOT delimited name is tableName - String attemptTableName = getAttemptTableName(db, tableName, true); - if (attemptTableName != null) { - return attemptTableName; - } - - // if the name does not start with table - // it should start with database - // DESCRIBE database.table - // DESCRIBE database.table column - if (tableName.split(delimiter).length == 3) { - // if DESCRIBE database.table column - // return table.column as column path - return tableName.substring( - tableName.indexOf(".") + 1, tableName.length()); - } - - // in other cases, column path is the same as tableName - return tableName; - } - - // get partition metadata - static public Map getPartitionSpec(Hive db, ASTNode ast, String tableName) - throws SemanticException { - // if ast has two children - // it could be DESCRIBE table key - // or DESCRIBE table partition - // check whether it is DESCRIBE table partition - if (ast.getChildCount() == 2) { - ASTNode partNode = (ASTNode) ast.getChild(1); - HashMap partSpec = null; - try { - partSpec = getPartSpec(partNode); - } catch (SemanticException e) { - // get exception in resolving partition - // it could be DESCRIBE table key - // return null - // continue processing for DESCRIBE table key - return null; - } - - Table tab = null; - try { - tab = db.getTable(tableName); - } catch (HiveException e) { - // if table not valid - // throw semantic exception - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); - } - - if (partSpec != null) { - Partition part = null; - try { - part = db.getPartition(tab, partSpec, false); - } catch (HiveException e) { - // if get exception in finding partition - // it could be DESCRIBE table key - // return null - // continue processing for DESCRIBE table key - return null; - } - - // if partition is not found - // it is DESCRIBE table partition - // invalid partition exception - if (part == null) { - throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partSpec.toString())); - } - - // it is DESCRIBE table partition - // return partition metadata - return partSpec; - } - } - - return null; - } - - } - - /** - * Create a FetchTask for a given thrift ddl schema. - * - * @param schema - * thrift ddl - */ - private FetchTask createFetchTask(String schema) { - Properties prop = new Properties(); - - prop.setProperty(serdeConstants.SERIALIZATION_FORMAT, "9"); - prop.setProperty(serdeConstants.SERIALIZATION_NULL_FORMAT, " "); - String[] colTypes = schema.split("#"); - prop.setProperty("columns", colTypes[0]); - prop.setProperty("columns.types", colTypes[1]); - prop.setProperty(serdeConstants.SERIALIZATION_LIB, LazySimpleSerDe.class.getName()); - FetchWork fetch = new FetchWork(ctx.getResFile(), new TableDesc( - TextInputFormat.class,IgnoreKeyTextOutputFormat.class, prop), -1); - fetch.setSerializationNullFormat(" "); - return (FetchTask) TaskFactory.get(fetch, conf); - } - - private void validateDatabase(String databaseName) throws SemanticException { - try { - if (!db.databaseExists(databaseName)) { - throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName), e); - } - } - - private void validateTable(String tableName, Map partSpec) - throws SemanticException { - Table tab = getTable(tableName); - if (partSpec != null) { - getPartition(tab, partSpec, true); - } - } - - private void analyzeDescribeTable(ASTNode ast) throws SemanticException { - ASTNode tableTypeExpr = (ASTNode) ast.getChild(0); - - String qualifiedName = - QualifiedNameUtil.getFullyQualifiedName((ASTNode) tableTypeExpr.getChild(0)); - String tableName = - QualifiedNameUtil.getTableName(db, (ASTNode)(tableTypeExpr.getChild(0))); - String dbName = - QualifiedNameUtil.getDBName(db, (ASTNode)(tableTypeExpr.getChild(0))); - - Map partSpec = - QualifiedNameUtil.getPartitionSpec(db, tableTypeExpr, tableName); - - String colPath = QualifiedNameUtil.getColPath( - db, tableTypeExpr, (ASTNode) tableTypeExpr.getChild(0), qualifiedName, partSpec); - - // if database is not the one currently using - // validate database - if (dbName != null) { - validateDatabase(dbName); - } - if (partSpec != null) { - validateTable(tableName, partSpec); - } - - DescTableDesc descTblDesc = new DescTableDesc( - ctx.getResFile(), tableName, partSpec, colPath); - - boolean showColStats = false; - if (ast.getChildCount() == 2) { - int descOptions = ast.getChild(1).getType(); - descTblDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED); - descTblDesc.setExt(descOptions == HiveParser.KW_EXTENDED); - descTblDesc.setPretty(descOptions == HiveParser.KW_PRETTY); - // in case of "DESCRIBE FORMATTED tablename column_name" statement, colPath - // will contain tablename.column_name. If column_name is not specified - // colPath will be equal to tableName. This is how we can differentiate - // if we are describing a table or column - if (!colPath.equalsIgnoreCase(tableName) && descTblDesc.isFormatted()) { - showColStats = true; - } - } - - inputs.add(new ReadEntity(getTable(tableName))); - Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - descTblDesc), conf); - rootTasks.add(ddlTask); - String schema = DescTableDesc.getSchema(showColStats); - setFetchTask(createFetchTask(schema)); - LOG.info("analyzeDescribeTable done"); - } - - /** - * Describe database. - * - * @param ast - * @throws SemanticException - */ - private void analyzeDescDatabase(ASTNode ast) throws SemanticException { - - boolean isExtended; - String dbName; - - if (ast.getChildCount() == 1) { - dbName = stripQuotes(ast.getChild(0).getText()); - isExtended = false; - } else if (ast.getChildCount() == 2) { - dbName = stripQuotes(ast.getChild(0).getText()); - isExtended = true; - } else { - throw new SemanticException("Unexpected Tokens at DESCRIBE DATABASE"); - } - - DescDatabaseDesc descDbDesc = new DescDatabaseDesc(ctx.getResFile(), - dbName, isExtended); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), descDbDesc), conf)); - setFetchTask(createFetchTask(descDbDesc.getSchema())); - } - - public static HashMap getPartSpec(ASTNode partspec) - throws SemanticException { - if (partspec == null) { - return null; - } - HashMap partSpec = new LinkedHashMap(); - for (int i = 0; i < partspec.getChildCount(); ++i) { - ASTNode partspec_val = (ASTNode) partspec.getChild(i); - String key = partspec_val.getChild(0).getText(); - String val = null; - if (partspec_val.getChildCount() > 1) { - val = stripQuotes(partspec_val.getChild(1).getText()); - } - partSpec.put(key.toLowerCase(), val); - } - return partSpec; - } - - private void analyzeShowPartitions(ASTNode ast) throws SemanticException { - ShowPartitionsDesc showPartsDesc; - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - List> partSpecs = getPartitionSpecs(ast); - // We only can have a single partition spec - assert (partSpecs.size() <= 1); - Map partSpec = null; - if (partSpecs.size() > 0) { - partSpec = partSpecs.get(0); - } - - validateTable(tableName, null); - - showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); - inputs.add(new ReadEntity(getTable(tableName))); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showPartsDesc), conf)); - setFetchTask(createFetchTask(showPartsDesc.getSchema())); - } - - private void analyzeShowCreateTable(ASTNode ast) throws SemanticException { - ShowCreateTableDesc showCreateTblDesc; - String tableName = getUnescapedName((ASTNode)ast.getChild(0)); - showCreateTblDesc = new ShowCreateTableDesc(tableName, ctx.getResFile().toString()); - - Table tab = getTable(tableName); - if (tab.getTableType() == org.apache.hadoop.hive.metastore.TableType.INDEX_TABLE) { - throw new SemanticException(ErrorMsg.SHOW_CREATETABLE_INDEX.getMsg(tableName - + " has table type INDEX_TABLE")); - } - inputs.add(new ReadEntity(tab)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showCreateTblDesc), conf)); - setFetchTask(createFetchTask(showCreateTblDesc.getSchema())); - } - - private void analyzeShowDatabases(ASTNode ast) throws SemanticException { - ShowDatabasesDesc showDatabasesDesc; - if (ast.getChildCount() == 1) { - String databasePattern = unescapeSQLString(ast.getChild(0).getText()); - showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile(), databasePattern); - } else { - showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile()); - } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showDatabasesDesc), conf)); - setFetchTask(createFetchTask(showDatabasesDesc.getSchema())); - } - - private void analyzeShowTables(ASTNode ast) throws SemanticException { - ShowTablesDesc showTblsDesc; - String dbName = SessionState.get().getCurrentDatabase(); - String tableNames = null; - - if (ast.getChildCount() > 3) { - throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); - } - - switch (ast.getChildCount()) { - case 1: // Uses a pattern - tableNames = unescapeSQLString(ast.getChild(0).getText()); - showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames); - break; - case 2: // Specifies a DB - assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); - dbName = unescapeIdentifier(ast.getChild(1).getText()); - validateDatabase(dbName); - showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - break; - case 3: // Uses a pattern and specifies a DB - assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); - dbName = unescapeIdentifier(ast.getChild(1).getText()); - tableNames = unescapeSQLString(ast.getChild(2).getText()); - validateDatabase(dbName); - showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames); - break; - default: // No pattern or DB - showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - break; - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblsDesc), conf)); - setFetchTask(createFetchTask(showTblsDesc.getSchema())); - } - - private void analyzeShowColumns(ASTNode ast) throws SemanticException { - ShowColumnsDesc showColumnsDesc; - String dbName = null; - String tableName = null; - switch (ast.getChildCount()) { - case 1: - tableName = getUnescapedName((ASTNode) ast.getChild(0)); - break; - case 2: - dbName = getUnescapedName((ASTNode) ast.getChild(0)); - tableName = getUnescapedName((ASTNode) ast.getChild(1)); - break; - default: - break; - } - - Table tab = getTable(dbName, tableName, true); - inputs.add(new ReadEntity(tab)); - - showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), dbName, tableName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showColumnsDesc), conf)); - setFetchTask(createFetchTask(showColumnsDesc.getSchema())); - } - - private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { - ShowTableStatusDesc showTblStatusDesc; - String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); - String dbName = SessionState.get().getCurrentDatabase(); - int children = ast.getChildCount(); - HashMap partSpec = null; - if (children >= 2) { - if (children > 3) { - throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); - } - for (int i = 1; i < children; i++) { - ASTNode child = (ASTNode) ast.getChild(i); - if (child.getToken().getType() == HiveParser.Identifier) { - dbName = unescapeIdentifier(child.getText()); - } else if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) { - partSpec = getPartSpec(child); - } else { - throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); - } - } - } - - if (partSpec != null) { - validateTable(tableNames, partSpec); - } - - showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName, - tableNames, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblStatusDesc), conf)); - setFetchTask(createFetchTask(showTblStatusDesc.getSchema())); - } - - private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { - ShowTblPropertiesDesc showTblPropertiesDesc; - String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); - String dbName = SessionState.get().getCurrentDatabase(); - String propertyName = null; - if (ast.getChildCount() > 1) { - propertyName = unescapeSQLString(ast.getChild(1).getText()); - } - - validateTable(tableNames, null); - - showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tableNames, - propertyName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblPropertiesDesc), conf)); - setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema())); - } - - private void analyzeShowIndexes(ASTNode ast) throws SemanticException { - ShowIndexesDesc showIndexesDesc; - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - showIndexesDesc = new ShowIndexesDesc(tableName, ctx.getResFile()); - - if (ast.getChildCount() == 2) { - int descOptions = ast.getChild(1).getType(); - showIndexesDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED); - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showIndexesDesc), conf)); - setFetchTask(createFetchTask(showIndexesDesc.getSchema())); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "SHOW FUNCTIONS;". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsin failed - */ - private void analyzeShowFunctions(ASTNode ast) throws SemanticException { - ShowFunctionsDesc showFuncsDesc; - if (ast.getChildCount() == 1) { - String funcNames = stripQuotes(ast.getChild(0).getText()); - showFuncsDesc = new ShowFunctionsDesc(ctx.getResFile(), funcNames); - } else { - showFuncsDesc = new ShowFunctionsDesc(ctx.getResFile()); - } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showFuncsDesc), conf)); - setFetchTask(createFetchTask(showFuncsDesc.getSchema())); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "SHOW LOCKS;". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeShowLocks(ASTNode ast) throws SemanticException { - String tableName = null; - HashMap partSpec = null; - boolean isExtended = false; - - if (ast.getChildCount() >= 1) { - // table for which show locks is being executed - for (int i = 0; i < ast.getChildCount(); i++) { - ASTNode child = (ASTNode) ast.getChild(i); - if (child.getType() == HiveParser.TOK_TABTYPE) { - ASTNode tableTypeExpr = child; - tableName = - QualifiedNameUtil.getFullyQualifiedName((ASTNode) tableTypeExpr.getChild(0)); - // get partition metadata if partition specified - if (tableTypeExpr.getChildCount() == 2) { - ASTNode partspec = (ASTNode) tableTypeExpr.getChild(1); - partSpec = getPartSpec(partspec); - } - } else if (child.getType() == HiveParser.KW_EXTENDED) { - isExtended = true; - } - } - } - - HiveTxnManager txnManager = null; - try { - txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); - } catch (LockException e) { - throw new SemanticException(e.getMessage()); - } - - ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), tableName, - partSpec, isExtended, txnManager.useNewShowLocksFormat()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showLocksDesc), conf)); - setFetchTask(createFetchTask(showLocksDesc.getSchema())); - - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "SHOW LOCKS DATABASE database [extended];". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeShowDbLocks(ASTNode ast) throws SemanticException { - boolean isExtended = (ast.getChildCount() > 1); - String dbName = stripQuotes(ast.getChild(0).getText()); - - HiveTxnManager txnManager = null; - try { - txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); - } catch (LockException e) { - throw new SemanticException(e.getMessage()); - } - - ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), dbName, - isExtended, txnManager.useNewShowLocksFormat()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showLocksDesc), conf)); - setFetchTask(createFetchTask(showLocksDesc.getSchema())); - - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - - private void analyzeShowConf(ASTNode ast) throws SemanticException { - String confName = stripQuotes(ast.getChild(0).getText()); - ShowConfDesc showConfDesc = new ShowConfDesc(ctx.getResFile(), confName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showConfDesc), conf)); - setFetchTask(createFetchTask(showConfDesc.getSchema())); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "LOCK TABLE ..;". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeLockTable(ASTNode ast) - throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)).toLowerCase(); - String mode = unescapeIdentifier(ast.getChild(1).getText().toUpperCase()); - List> partSpecs = getPartitionSpecs(ast); - - // We only can have a single partition spec - assert (partSpecs.size() <= 1); - Map partSpec = null; - if (partSpecs.size() > 0) { - partSpec = partSpecs.get(0); - } - - LockTableDesc lockTblDesc = new LockTableDesc(tableName, mode, partSpec, - HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); - lockTblDesc.setQueryStr(this.ctx.getCmd()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - lockTblDesc), conf)); - - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - - /** - * Add a task to execute "SHOW COMPACTIONS" - * @param ast The parsed command tree. - * @throws SemanticException Parsing failed. - */ - private void analyzeShowCompactions(ASTNode ast) throws SemanticException { - ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); - setFetchTask(createFetchTask(desc.getSchema())); - } - - /** - * Add a task to execute "SHOW COMPACTIONS" - * @param ast The parsed command tree. - * @throws SemanticException Parsing failed. - */ - private void analyzeShowTxns(ASTNode ast) throws SemanticException { - ShowTxnsDesc desc = new ShowTxnsDesc(ctx.getResFile()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); - setFetchTask(createFetchTask(desc.getSchema())); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "UNLOCK TABLE ..;". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeUnlockTable(ASTNode ast) - throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - List> partSpecs = getPartitionSpecs(ast); - - // We only can have a single partition spec - assert (partSpecs.size() <= 1); - Map partSpec = null; - if (partSpecs.size() > 0) { - partSpec = partSpecs.get(0); - } - - UnlockTableDesc unlockTblDesc = new UnlockTableDesc(tableName, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - unlockTblDesc), conf)); - - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - - private void analyzeLockDatabase(ASTNode ast) throws SemanticException { - String dbName = unescapeIdentifier(ast.getChild(0).getText()); - String mode = unescapeIdentifier(ast.getChild(1).getText().toUpperCase()); - - //inputs.add(new ReadEntity(dbName)); - //outputs.add(new WriteEntity(dbName)); - LockDatabaseDesc lockDatabaseDesc = new LockDatabaseDesc(dbName, mode, - HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); - lockDatabaseDesc.setQueryStr(ctx.getCmd()); - DDLWork work = new DDLWork(getInputs(), getOutputs(), lockDatabaseDesc); - rootTasks.add(TaskFactory.get(work, conf)); - ctx.setNeedLockMgr(true); - } - - private void analyzeUnlockDatabase(ASTNode ast) throws SemanticException { - String dbName = unescapeIdentifier(ast.getChild(0).getText()); - - UnlockDatabaseDesc unlockDatabaseDesc = new UnlockDatabaseDesc(dbName); - DDLWork work = new DDLWork(getInputs(), getOutputs(), unlockDatabaseDesc); - rootTasks.add(TaskFactory.get(work, conf)); - // Need to initialize the lock manager - ctx.setNeedLockMgr(true); - } - - /** - * Add the task according to the parsed command tree. This is used for the CLI - * command "DESCRIBE FUNCTION;". - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeDescFunction(ASTNode ast) throws SemanticException { - String funcName; - boolean isExtended; - - if (ast.getChildCount() == 1) { - funcName = stripQuotes(ast.getChild(0).getText()); - isExtended = false; - } else if (ast.getChildCount() == 2) { - funcName = stripQuotes(ast.getChild(0).getText()); - isExtended = true; - } else { - throw new SemanticException("Unexpected Tokens at DESCRIBE FUNCTION"); - } - - DescFunctionDesc descFuncDesc = new DescFunctionDesc(ctx.getResFile(), - funcName, isExtended); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - descFuncDesc), conf)); - setFetchTask(createFetchTask(descFuncDesc.getSchema())); - } - - - private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws SemanticException { - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, - getUnescapedName((ASTNode) ast.getChild(1)), expectView); - - addInputsOutputsAlterTable(tblName, null, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException { - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - String newComment = null; - String newType = null; - newType = getTypeStringFromAST((ASTNode) ast.getChild(3)); - boolean first = false; - String flagCol = null; - ASTNode positionNode = null; - if (ast.getChildCount() == 6) { - newComment = unescapeSQLString(ast.getChild(4).getText()); - positionNode = (ASTNode) ast.getChild(5); - } else if (ast.getChildCount() == 5) { - if (ast.getChild(4).getType() == HiveParser.StringLiteral) { - newComment = unescapeSQLString(ast.getChild(4).getText()); - } else { - positionNode = (ASTNode) ast.getChild(4); - } - } - - if (positionNode != null) { - if (positionNode.getChildCount() == 0) { - first = true; - } else { - flagCol = unescapeIdentifier(positionNode.getChild(0).getText()); - } - } - - String oldColName = ast.getChild(1).getText(); - String newColName = ast.getChild(2).getText(); - - /* Validate the operation of renaming a column name. */ - Table tab = getTable(tblName); - - SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo(); - if ((null != skewInfo) - && (null != skewInfo.getSkewedColNames()) - && skewInfo.getSkewedColNames().contains(oldColName)) { - throw new SemanticException(oldColName - + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg()); - } - - AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, - unescapeIdentifier(oldColName), unescapeIdentifier(newColName), - newType, newComment, first, flagCol); - addInputsOutputsAlterTable(tblName, null, alterTblDesc); - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, - HashMap oldPartSpec) throws SemanticException { - Map newPartSpec = extractPartitionSpecs(ast.getChild(0)); - if (newPartSpec == null) { - throw new SemanticException("RENAME PARTITION Missing Destination" + ast); - } - Table tab = getTable(tblName, true); - validateAlterTableType(tab, AlterTableTypes.RENAMEPARTITION); - ReadEntity re = new ReadEntity(tab); - re.noLockNeeded(); - inputs.add(re); - - List> partSpecs = new ArrayList>(); - partSpecs.add(oldPartSpec); - partSpecs.add(newPartSpec); - addTablePartsOutputs(tblName, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE); - RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc( - SessionState.get().getCurrentDatabase(), tblName, oldPartSpec, newPartSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - renamePartitionDesc), conf)); - } - - private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, - HashMap partSpec) throws SemanticException { - Table tab = getTable(tblName, true); - if (tab.getBucketCols() == null || tab.getBucketCols().isEmpty()) { - throw new SemanticException(ErrorMsg.ALTER_BUCKETNUM_NONBUCKETIZED_TBL.getMsg()); - } - validateAlterTableType(tab, AlterTableTypes.ALTERBUCKETNUM); - inputs.add(new ReadEntity(tab)); - - int bucketNum = Integer.parseInt(ast.getChild(0).getText()); - AlterTableDesc alterBucketNum = new AlterTableDesc(tblName, partSpec, bucketNum); - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterBucketNum), conf)); - } - - private void analyzeAlterTableModifyCols(ASTNode ast, - AlterTableTypes alterType) throws SemanticException { - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - List newCols = getColumns((ASTNode) ast.getChild(1)); - AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols, - alterType); - - addInputsOutputsAlterTable(tblName, null, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) - throws SemanticException { - - boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null) - || HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); - // If the drop has to fail on non-existent partitions, we cannot batch expressions. - // That is because we actually have to check each separate expression for existence. - // We could do a small optimization for the case where expr has all columns and all - // operators are equality, if we assume those would always match one partition (which - // may not be true with legacy, non-normalized column values). This is probably a - // popular case but that's kinda hacky. Let's not do it for now. - boolean canGroupExprs = ifExists; - - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - Table tab = getTable(tblName, true); - Map> partSpecs = - getFullPartitionSpecs(ast, tab, canGroupExprs); - if (partSpecs.isEmpty()) return; // nothing to do - - validateAlterTableType(tab, AlterTableTypes.DROPPARTITION, expectView); - ReadEntity re = new ReadEntity(tab); - re.noLockNeeded(); - inputs.add(re); - - boolean ignoreProtection = ast.getFirstChildWithType(HiveParser.TOK_IGNOREPROTECTION) != null; - addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection); - - DropTableDesc dropTblDesc = - new DropTableDesc(tblName, partSpecs, expectView, ignoreProtection); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); - } - - private void analyzeAlterTablePartColType(ASTNode ast) - throws SemanticException { - // get table name - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); - - Table tab = null; - - // check if table exists. - try { - tab = getTable(tblName, true); - inputs.add(new ReadEntity(tab)); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } - - // validate the DDL is a valid operation on the table. - validateAlterTableType(tab, AlterTableTypes.ALTERPARTITION, false); - - // Alter table ... partition column ( column newtype) only takes one column at a time. - // It must have a column name followed with type. - ASTNode colAst = (ASTNode) ast.getChild(1); - assert(colAst.getChildCount() == 2); - - FieldSchema newCol = new FieldSchema(); - - // get column name - String name = colAst.getChild(0).getText().toLowerCase(); - newCol.setName(unescapeIdentifier(name)); - - // get column type - ASTNode typeChild = (ASTNode) (colAst.getChild(1)); - newCol.setType(getTypeStringFromAST(typeChild)); - - // check if column is defined or not - boolean fFoundColumn = false; - for( FieldSchema col : tab.getTTable().getPartitionKeys()) { - if (col.getName().compareTo(newCol.getName()) == 0) { - fFoundColumn = true; - } - } - - // raise error if we could not find the column - if (!fFoundColumn) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(newCol.getName())); - } - - AlterTableAlterPartDesc alterTblAlterPartDesc = - new AlterTableAlterPartDesc(SessionState.get().getCurrentDatabase(), tblName, newCol); - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblAlterPartDesc), conf)); - } - - /** - * Add one or more partitions to a table. Useful when the data has been copied - * to the right location by some other process. - * - * @param ast - * The parsed command tree. - * - * @param expectView - * True for ALTER VIEW, false for ALTER TABLE. - * - * @throws SemanticException - * Parsing failed - */ - private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) - throws SemanticException { - - // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); - boolean ifNotExists = ast.getChild(1).getType() == HiveParser.TOK_IFNOTEXISTS; - - Table tab = getTable(tblName, true); - boolean isView = tab.isView(); - validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED)); - - int numCh = ast.getChildCount(); - int start = ifNotExists ? 2 : 1; - - String currentLocation = null; - Map currentPart = null; - // Parser has done some verification, so the order of tokens doesn't need to be verified here. - AddPartitionDesc addPartitionDesc = new AddPartitionDesc(tab.getDbName(), tblName, ifNotExists); - for (int num = start; num < numCh; num++) { - ASTNode child = (ASTNode) ast.getChild(num); - switch (child.getToken().getType()) { - case HiveParser.TOK_PARTSPEC: - if (currentPart != null) { - addPartitionDesc.addPartition(currentPart, currentLocation); - currentLocation = null; - } - currentPart = getPartSpec(child); - validatePartitionValues(currentPart); // validate reserved values - validatePartSpec(tab, currentPart, child, conf, true); - break; - case HiveParser.TOK_PARTITIONLOCATION: - // if location specified, set in partition - if (isView) { - throw new SemanticException("LOCATION clause illegal for view partition"); - } - currentLocation = unescapeSQLString(child.getChild(0).getText()); - boolean isLocal = false; - try { - // do best effor to determine if this is a local file - String scheme = new URI(currentLocation).getScheme(); - if (scheme != null) { - isLocal = FileUtils.isLocalFile(conf, currentLocation); - } - } catch (URISyntaxException e) { - LOG.warn("Unable to create URI from " + currentLocation, e); - } - inputs.add(new ReadEntity(new Path(currentLocation), isLocal)); - break; - default: - throw new SemanticException("Unknown child: " + child); - } - } - - // add the last one - if (currentPart != null) { - addPartitionDesc.addPartition(currentPart, currentLocation); - } - - if (addPartitionDesc.getPartitionCount() == 0) { - // nothing to do - return; - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf)); - - if (isView) { - // Compile internal query to capture underlying table partition dependencies - StringBuilder cmd = new StringBuilder(); - cmd.append("SELECT * FROM "); - cmd.append(HiveUtils.unparseIdentifier(tblName)); - cmd.append(" WHERE "); - boolean firstOr = true; - for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) { - AddPartitionDesc.OnePartitionDesc partitionDesc = addPartitionDesc.getPartition(i); - if (firstOr) { - firstOr = false; - } else { - cmd.append(" OR "); - } - boolean firstAnd = true; - cmd.append("("); - for (Map.Entry entry : partitionDesc.getPartSpec().entrySet()) { - if (firstAnd) { - firstAnd = false; - } else { - cmd.append(" AND "); - } - cmd.append(HiveUtils.unparseIdentifier(entry.getKey(), conf)); - cmd.append(" = '"); - cmd.append(HiveUtils.escapeString(entry.getValue())); - cmd.append("'"); - } - cmd.append(")"); - } - Driver driver = new Driver(conf); - int rc = driver.compile(cmd.toString(), false); - if (rc != 0) { - throw new SemanticException(ErrorMsg.NO_VALID_PARTN.getMsg()); - } - inputs.addAll(driver.getPlan().getInputs()); - } - } - - private Partition getPartitionForOutput(Table tab, Map currentPart) - throws SemanticException { - validatePartitionValues(currentPart); - try { - Partition partition = db.getPartition(tab, currentPart, false); - if (partition != null) { - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.INSERT)); - } - return partition; - } catch (HiveException e) { - LOG.warn("wrong partition spec " + currentPart); - } - return null; - } - - /** - * Rewrite the metadata for one or more partitions in a table. Useful when - * an external process modifies files on HDFS and you want the pre/post - * hooks to be fired for the specified partition. - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsin failed - */ - private void analyzeAlterTableTouch(CommonTree ast) - throws SemanticException { - - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); - Table tab = getTable(tblName, true); - validateAlterTableType(tab, AlterTableTypes.TOUCH); - inputs.add(new ReadEntity(tab)); - - // partition name to value - List> partSpecs = getPartitionSpecs(ast); - - if (partSpecs.size() == 0) { - AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, null, - AlterTableDesc.AlterTableTypes.TOUCH); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - touchDesc), conf)); - } else { - addTablePartsOutputs(tblName, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK); - for (Map partSpec : partSpecs) { - AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, partSpec, - AlterTableDesc.AlterTableTypes.TOUCH); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - touchDesc), conf)); - } - } - } - - private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) - throws SemanticException { - - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) { - throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); - - } - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - // partition name to value - List> partSpecs = getPartitionSpecs(ast); - - Table tab = getTable(tblName, true); - addTablePartsOutputs(tblName, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK); - validateAlterTableType(tab, AlterTableTypes.ARCHIVE); - inputs.add(new ReadEntity(tab)); - - if (partSpecs.size() > 1) { - throw new SemanticException(isUnArchive ? - ErrorMsg.UNARCHIVE_ON_MULI_PARTS.getMsg() : - ErrorMsg.ARCHIVE_ON_MULI_PARTS.getMsg()); - } - if (partSpecs.size() == 0) { - throw new SemanticException(ErrorMsg.ARCHIVE_ON_TABLE.getMsg()); - } - - Map partSpec = partSpecs.get(0); - try { - isValidPrefixSpec(tab, partSpec); - } catch (HiveException e) { - throw new SemanticException(e.getMessage(), e); - } - AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, partSpec, - (isUnArchive ? AlterTableTypes.UNARCHIVE : AlterTableTypes.ARCHIVE)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - archiveDesc), conf)); - - } - - /** - * Verify that the information in the metastore matches up with the data on - * the fs. - * - * @param ast - * Query tree. - * @throws SemanticException - */ - private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { - String tableName = null; - boolean repair = false; - if (ast.getChildCount() > 0) { - repair = ast.getChild(0).getType() == HiveParser.KW_REPAIR; - if (!repair) { - tableName = getUnescapedName((ASTNode) ast.getChild(0)); - } else if (ast.getChildCount() > 1) { - tableName = getUnescapedName((ASTNode) ast.getChild(1)); - } - } - List> specs = getPartitionSpecs(ast); - MsckDesc checkDesc = new MsckDesc(tableName, specs, ctx.getResFile(), - repair); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - checkDesc), conf)); - } - - /** - * Get the partition specs from the tree. - * - * @param ast - * Tree to extract partitions from. - * @return A list of partition name to value mappings. - * @throws SemanticException - */ - private List> getPartitionSpecs(CommonTree ast) - throws SemanticException { - List> partSpecs = new ArrayList>(); - int childIndex = 0; - // get partition metadata if partition specified - for (childIndex = 1; childIndex < ast.getChildCount(); childIndex++) { - Tree partspec = ast.getChild(childIndex); - // sanity check - if (partspec.getType() == HiveParser.TOK_PARTSPEC) { - partSpecs.add(getPartSpec((ASTNode) partspec)); - } - } - return partSpecs; - } - - /** - * Get the partition specs from the tree. This stores the full specification - * with the comparator operator into the output list. - * - * @param ast Tree to extract partitions from. - * @param tab Table. - * @param result Map of partitions by prefix length. Most of the time prefix length will - * be the same for all partition specs, so we can just OR the expressions. - */ - private Map> getFullPartitionSpecs( - CommonTree ast, Table tab, boolean canGroupExprs) throws SemanticException { - Map colTypes = new HashMap(); - for (FieldSchema fs : tab.getPartitionKeys()) { - colTypes.put(fs.getName().toLowerCase(), fs.getType()); - } - - Map> result = - new HashMap>(); - for (int childIndex = 1; childIndex < ast.getChildCount(); childIndex++) { - Tree partSpecTree = ast.getChild(childIndex); - if (partSpecTree.getType() != HiveParser.TOK_PARTSPEC) continue; - ExprNodeGenericFuncDesc expr = null; - HashSet names = new HashSet(partSpecTree.getChildCount()); - for (int i = 0; i < partSpecTree.getChildCount(); ++i) { - CommonTree partSpecSingleKey = (CommonTree) partSpecTree.getChild(i); - assert (partSpecSingleKey.getType() == HiveParser.TOK_PARTVAL); - String key = partSpecSingleKey.getChild(0).getText().toLowerCase(); - String operator = partSpecSingleKey.getChild(1).getText(); - String val = stripQuotes(partSpecSingleKey.getChild(2).getText()); - - String type = colTypes.get(key); - if (type == null) { - throw new SemanticException("Column " + key + " not found"); - } - // Create the corresponding hive expression to filter on partition columns. - PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type); - Converter converter = ObjectInspectorConverters.getConverter( - TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(TypeInfoFactory.stringTypeInfo), - TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(pti)); - ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, null, true); - ExprNodeGenericFuncDesc op = makeBinaryPredicate( - operator, column, new ExprNodeConstantDesc(pti, converter.convert(val))); - // If it's multi-expr filter (e.g. a='5', b='2012-01-02'), AND with previous exprs. - expr = (expr == null) ? op : makeBinaryPredicate("and", expr, op); - names.add(key); - } - if (expr == null) continue; - // We got the expr for one full partition spec. Determine the prefix length. - int prefixLength = calculatePartPrefix(tab, names); - List orExpr = result.get(prefixLength); - // We have to tell apart partitions resulting from spec with different prefix lengths. - // So, if we already have smth for the same prefix length, we can OR the two. - // If we don't, create a new separate filter. In most cases there will only be one. - if (orExpr == null) { - result.put(prefixLength, Lists.newArrayList(expr)); - } else if (canGroupExprs) { - orExpr.set(0, makeBinaryPredicate("or", expr, orExpr.get(0))); - } else { - orExpr.add(expr); - } - } - return result; - } - - private static ExprNodeGenericFuncDesc makeBinaryPredicate( - String fn, ExprNodeDesc left, ExprNodeDesc right) { - return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, - FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), Lists.newArrayList(left, right)); - } - - /** - * Calculates the partition prefix length based on the drop spec. - * This is used to avoid deleting archived partitions with lower level. - * For example, if, for A and B key cols, drop spec is A=5, B=6, we shouldn't drop - * archived A=5/, because it can contain B-s other than 6. - * @param tbl Table - * @param partSpecKeys Keys present in drop partition spec. - */ - private int calculatePartPrefix(Table tbl, HashSet partSpecKeys) { - int partPrefixToDrop = 0; - for (FieldSchema fs : tbl.getPartCols()) { - if (!partSpecKeys.contains(fs.getName())) break; - ++partPrefixToDrop; - } - return partPrefixToDrop; - } - - /** - * Certain partition values are are used by hive. e.g. the default partition - * in dynamic partitioning and the intermediate partition values used in the - * archiving process. Naturally, prohibit the user from creating partitions - * with these reserved values. The check that this function is more - * restrictive than the actual limitation, but it's simpler. Should be okay - * since the reserved names are fairly long and uncommon. - */ - private void validatePartitionValues(Map partSpec) - throws SemanticException { - - for (Entry e : partSpec.entrySet()) { - for (String s : reservedPartitionValues) { - if (e.getValue().contains(s)) { - throw new SemanticException(ErrorMsg.RESERVED_PART_VAL.getMsg( - "(User value: " + e.getValue() + " Reserved substring: " + s + ")")); - } - } - } - } - - /** - * Add the table partitions to be modified in the output, so that it is available for the - * pre-execution hook. If the partition does not exist, no error is thrown. - */ - private void addTablePartsOutputs(String tblName, List> partSpecs, - WriteEntity.WriteType writeType) - throws SemanticException { - addTablePartsOutputs(tblName, partSpecs, false, false, null, writeType); - } - - /** - * Add the table partitions to be modified in the output, so that it is available for the - * pre-execution hook. If the partition does not exist, no error is thrown. - */ - private void addTablePartsOutputs(String tblName, List> partSpecs, - boolean allowMany, WriteEntity.WriteType writeType) - throws SemanticException { - addTablePartsOutputs(tblName, partSpecs, false, allowMany, null, writeType); - } - - /** - * Add the table partitions to be modified in the output, so that it is available for the - * pre-execution hook. If the partition does not exist, throw an error if - * throwIfNonExistent is true, otherwise ignore it. - */ - private void addTablePartsOutputs(String tblName, List> partSpecs, - boolean throwIfNonExistent, boolean allowMany, ASTNode ast, WriteEntity.WriteType writeType) - throws SemanticException { - Table tab = getTable(tblName); - - Iterator> i; - int index; - for (i = partSpecs.iterator(), index = 1; i.hasNext(); ++index) { - Map partSpec = i.next(); - List parts = null; - if (allowMany) { - try { - parts = db.getPartitions(tab, partSpec); - } catch (HiveException e) { - LOG.error("Got HiveException during obtaining list of partitions" - + StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - } else { - parts = new ArrayList(); - try { - Partition p = db.getPartition(tab, partSpec, false); - if (p != null) { - parts.add(p); - } - } catch (HiveException e) { - LOG.debug("Wrong specification" + StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - } - if (parts.isEmpty()) { - if (throwIfNonExistent) { - throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(ast.getChild(index))); - } - } - for (Partition p : parts) { - // Don't request any locks here, as the table has already been locked. - outputs.add(new WriteEntity(p, writeType)); - } - } - } - - /** - * Add the table partitions to be modified in the output, so that it is available for the - * pre-execution hook. If the partition does not exist, throw an error if - * throwIfNonExistent is true, otherwise ignore it. - */ - private void addTableDropPartsOutputs(Table tab, - Collection> partSpecs, boolean throwIfNonExistent, - boolean ignoreProtection) throws SemanticException { - - for (List specs : partSpecs) { - for (ExprNodeGenericFuncDesc partSpec : specs) { - List parts = new ArrayList(); - boolean hasUnknown = false; - try { - hasUnknown = db.getPartitionsByExpr(tab, partSpec, conf, parts); - } catch (Exception e) { - throw new SemanticException( - ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString()), e); - } - if (hasUnknown) { - throw new SemanticException( - "Unexpected unknown partitions for " + partSpec.getExprString()); - } - - // TODO: ifExists could be moved to metastore. In fact it already supports that. Check it - // for now since we get parts for output anyway, so we can get the error message - // earlier... If we get rid of output, we can get rid of this. - if (parts.isEmpty()) { - if (throwIfNonExistent) { - throw new SemanticException( - ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString())); - } - } - for (Partition p : parts) { - // TODO: same thing, metastore already checks this but check here if we can. - if (!ignoreProtection && !p.canDrop()) { - throw new SemanticException( - ErrorMsg.DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION.getMsg(p.getCompleteName())); - } - outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } - } - } - } - - /** - * Analyze alter table's skewed table - * - * @param ast - * node - * @throws SemanticException - */ - private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { - /** - * Throw an error if the user tries to use the DDL with - * hive.internal.ddl.list.bucketing.enable set to false. - */ - HiveConf hiveConf = SessionState.get().getConf(); - - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - Table tab = getTable(tableName, true); - - inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); - - validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY); - - if (ast.getChildCount() == 1) { - /* Convert a skewed table to non-skewed table. */ - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, - new ArrayList(), new ArrayList>()); - alterTblDesc.setStoredAsSubDirectories(false); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } else { - switch (((ASTNode) ast.getChild(1)).getToken().getType()) { - case HiveParser.TOK_TABLESKEWED: - handleAlterTableSkewedBy(ast, tableName, tab); - break; - case HiveParser.TOK_STOREDASDIRS: - handleAlterTableDisableStoredAsDirs(tableName, tab); - break; - default: - assert false; - } - } - } - - /** - * Handle alter table not stored as directories - * - * @param tableName - * @param tab - * @throws SemanticException - */ - private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) - throws SemanticException { - List skewedColNames = tab.getSkewedColNames(); - List> skewedColValues = tab.getSkewedColValues(); - if ((skewedColNames == null) || (skewedColNames.size() == 0) || (skewedColValues == null) - || (skewedColValues.size() == 0)) { - throw new SemanticException(ErrorMsg.ALTER_TBL_STOREDASDIR_NOT_SKEWED.getMsg(tableName)); - } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, - skewedColNames, skewedColValues); - alterTblDesc.setStoredAsSubDirectories(false); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - /** - * Process "alter table skewed by .. on .. stored as directories - * @param ast - * @param tableName - * @param tab - * @throws SemanticException - */ - private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) - throws SemanticException { - List skewedColNames = new ArrayList(); - List> skewedValues = new ArrayList>(); - /* skewed column names. */ - ASTNode skewedNode = (ASTNode) ast.getChild(1); - skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, skewedNode); - /* skewed value. */ - analyzeDDLSkewedValues(skewedValues, skewedNode); - // stored as directories - boolean storedAsDirs = analyzeStoredAdDirs(skewedNode); - - - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, - skewedColNames, skewedValues); - alterTblDesc.setStoredAsSubDirectories(storedAsDirs); - /** - * Validate information about skewed table - */ - alterTblDesc.setTable(tab); - alterTblDesc.validate(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - /** - * Analyze skewed column names - * - * @param skewedColNames - * @param child - * @return - * @throws SemanticException - */ - private List analyzeAlterTableSkewedColNames(List skewedColNames, - ASTNode child) throws SemanticException { - Tree nNode = child.getChild(0); - if (nNode == null) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - ASTNode nAstNode = (ASTNode) nNode; - if (nAstNode.getToken().getType() != HiveParser.TOK_TABCOLNAME) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - skewedColNames = getColumnNames(nAstNode); - } - } - return skewedColNames; - } - - /** - * Given a ASTNode, return list of values. - * - * use case: - * create table xyz list bucketed (col1) with skew (1,2,5) - * AST Node is for (1,2,5) - * - * @param ast - * @return - */ - private List getColumnValues(ASTNode ast) { - List colList = new ArrayList(); - int numCh = ast.getChildCount(); - for (int i = 0; i < numCh; i++) { - ASTNode child = (ASTNode) ast.getChild(i); - colList.add(stripQuotes(child.getText()).toLowerCase()); - } - return colList; - } - - - /** - * Analyze alter table's skewed location - * - * @param ast - * @param tableName - * @param partSpec - * @throws SemanticException - */ - private void analyzeAlterTableSkewedLocation(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { - /** - * Throw an error if the user tries to use the DDL with - * hive.internal.ddl.list.bucketing.enable set to false. - */ - HiveConf hiveConf = SessionState.get().getConf(); - /** - * Retrieve mappings from parser - */ - Map, String> locations = new HashMap, String>(); - ArrayList locNodes = ast.getChildren(); - if (null == locNodes) { - throw new SemanticException(ErrorMsg.ALTER_TBL_SKEWED_LOC_NO_LOC.getMsg()); - } else { - for (Node locNode : locNodes) { - // TOK_SKEWED_LOCATIONS - ASTNode locAstNode = (ASTNode) locNode; - ArrayList locListNodes = locAstNode.getChildren(); - if (null == locListNodes) { - throw new SemanticException(ErrorMsg.ALTER_TBL_SKEWED_LOC_NO_LOC.getMsg()); - } else { - for (Node locListNode : locListNodes) { - // TOK_SKEWED_LOCATION_LIST - ASTNode locListAstNode = (ASTNode) locListNode; - ArrayList locMapNodes = locListAstNode.getChildren(); - if (null == locMapNodes) { - throw new SemanticException(ErrorMsg.ALTER_TBL_SKEWED_LOC_NO_LOC.getMsg()); - } else { - for (Node locMapNode : locMapNodes) { - // TOK_SKEWED_LOCATION_MAP - ASTNode locMapAstNode = (ASTNode) locMapNode; - ArrayList locMapAstNodeMaps = locMapAstNode.getChildren(); - if ((null == locMapAstNodeMaps) || (locMapAstNodeMaps.size() != 2)) { - throw new SemanticException(ErrorMsg.ALTER_TBL_SKEWED_LOC_NO_MAP.getMsg()); - } else { - List keyList = new LinkedList(); - ASTNode node = (ASTNode) locMapAstNodeMaps.get(0); - if (node.getToken().getType() == HiveParser.TOK_TABCOLVALUES) { - keyList = getSkewedValuesFromASTNode(node); - } else if (isConstant(node)) { - keyList.add(PlanUtils - .stripQuotes(node.getText())); - } else { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_VALUE.getMsg()); - } - String newLocation = PlanUtils - .stripQuotes(unescapeSQLString(((ASTNode) locMapAstNodeMaps.get(1)) - .getText())); - validateSkewedLocationString(newLocation); - locations.put(keyList, newLocation); - addLocationToOutputs(newLocation); - } - } - } - } - } - } - } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, locations, partSpec); - addInputsOutputsAlterTable(tableName, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } - - private void addLocationToOutputs(String newLocation) { - outputs.add(new WriteEntity(new Path(newLocation), FileUtils.isLocalFile(conf, newLocation))); - } - - /** - * Check if the node is constant. - * - * @param node - * @return - */ - private boolean isConstant(ASTNode node) { - boolean result = false; - switch(node.getToken().getType()) { - case HiveParser.Number: - result = true; - break; - case HiveParser.StringLiteral: - result = true; - break; - case HiveParser.BigintLiteral: - result = true; - break; - case HiveParser.SmallintLiteral: - result = true; - break; - case HiveParser.TinyintLiteral: - result = true; - break; - case HiveParser.DecimalLiteral: - result = true; - break; - case HiveParser.CharSetName: - result = true; - break; - case HiveParser.KW_TRUE: - case HiveParser.KW_FALSE: - result = true; - break; - default: - break; - } - return result; - } - - private void validateSkewedLocationString(String newLocation) throws SemanticException { - /* Validate location string. */ - try { - URI locUri = new URI(newLocation); - if (!locUri.isAbsolute() || locUri.getScheme() == null - || locUri.getScheme().trim().equals("")) { - throw new SemanticException( - newLocation - + " is not absolute or has no scheme information. " - + "Please specify a complete absolute uri with scheme information."); - } - } catch (URISyntaxException e) { - throw new SemanticException(e); - } - } + private static final Log LOG = LogFactory.getLog(DDLSemanticAnalyzer.class); + private static final Map TokenToTypeName = new HashMap(); + + private final Set reservedPartitionValues; + private final HiveAuthorizationTaskFactory hiveAuthorizationTaskFactory; + + static { + TokenToTypeName.put(HiveParser.TOK_BOOLEAN, + serdeConstants.BOOLEAN_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_TINYINT, + serdeConstants.TINYINT_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_SMALLINT, + serdeConstants.SMALLINT_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_INT, serdeConstants.INT_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_BIGINT, + serdeConstants.BIGINT_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_FLOAT, + serdeConstants.FLOAT_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_DOUBLE, + serdeConstants.DOUBLE_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_STRING, + serdeConstants.STRING_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_CHAR, serdeConstants.CHAR_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_VARCHAR, + serdeConstants.VARCHAR_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_BINARY, + serdeConstants.BINARY_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_DATE, serdeConstants.DATE_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_DATETIME, + serdeConstants.DATETIME_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_TIMESTAMP, + serdeConstants.TIMESTAMP_TYPE_NAME); + TokenToTypeName.put(HiveParser.TOK_DECIMAL, + serdeConstants.DECIMAL_TYPE_NAME); + } + + public static String getTypeName(ASTNode node) throws SemanticException { + int token = node.getType(); + String typeName; + + // datetime type isn't currently supported + if (token == HiveParser.TOK_DATETIME) { + throw new SemanticException(ErrorMsg.UNSUPPORTED_TYPE.getMsg()); + } + + switch (token) { + case HiveParser.TOK_CHAR: + CharTypeInfo charTypeInfo = ParseUtils.getCharTypeInfo(node); + typeName = charTypeInfo.getQualifiedName(); + break; + case HiveParser.TOK_VARCHAR: + VarcharTypeInfo varcharTypeInfo = ParseUtils + .getVarcharTypeInfo(node); + typeName = varcharTypeInfo.getQualifiedName(); + break; + case HiveParser.TOK_DECIMAL: + DecimalTypeInfo decTypeInfo = ParseUtils + .getDecimalTypeTypeInfo(node); + typeName = decTypeInfo.getQualifiedName(); + break; + default: + typeName = TokenToTypeName.get(token); + } + return typeName; + } + + static class TablePartition { + String tableName; + HashMap partSpec = null; + + public TablePartition() { + } + + public TablePartition(ASTNode tblPart) throws SemanticException { + tableName = unescapeIdentifier(tblPart.getChild(0).getText()); + if (tblPart.getChildCount() > 1) { + ASTNode part = (ASTNode) tblPart.getChild(1); + if (part.getToken().getType() == HiveParser.TOK_PARTSPEC) { + this.partSpec = DDLSemanticAnalyzer.getPartSpec(part); + } + } + } + } + + public DDLSemanticAnalyzer(HiveConf conf) throws SemanticException { + this(conf, createHiveDB(conf)); + } + + public DDLSemanticAnalyzer(HiveConf conf, Hive db) throws SemanticException { + super(conf, db); + reservedPartitionValues = new HashSet(); + // Partition can't have this name + reservedPartitionValues.add(HiveConf.getVar(conf, + ConfVars.DEFAULTPARTITIONNAME)); + reservedPartitionValues.add(HiveConf.getVar(conf, + ConfVars.DEFAULT_ZOOKEEPER_PARTITION_NAME)); + // Partition value can't end in this suffix + reservedPartitionValues.add(HiveConf.getVar(conf, + ConfVars.METASTORE_INT_ORIGINAL)); + reservedPartitionValues.add(HiveConf.getVar(conf, + ConfVars.METASTORE_INT_ARCHIVED)); + reservedPartitionValues.add(HiveConf.getVar(conf, + ConfVars.METASTORE_INT_EXTRACTED)); + hiveAuthorizationTaskFactory = new HiveAuthorizationTaskFactoryImpl( + conf, db); + } + + @Override + public void analyzeInternal(ASTNode ast) throws SemanticException { + + switch (ast.getToken().getType()) { + case HiveParser.TOK_ALTERTABLE_PARTITION: { + ASTNode tablePart = (ASTNode) ast.getChild(0); + TablePartition tblPart = new TablePartition(tablePart); + String tableName = tblPart.tableName; + HashMap partSpec = tblPart.partSpec; + ast = (ASTNode) ast.getChild(1); + if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS){ + this.analyzeAlterTableUpdateStats(ast,tblPart); + } + else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { + analyzeAlterTableFileFormat(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROTECTMODE) { + analyzeAlterTableProtectMode(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) { + analyzeAlterTableLocation(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) { + analyzeAlterTablePartMergeFiles(tablePart, ast, tableName, + partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER) { + analyzeAlterTableSerde(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) { + analyzeAlterTableSerdeProps(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { + analyzeAlterTableRenamePart(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION) { + analyzeAlterTableSkewedLocation(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_TABLEBUCKETS) { + analyzeAlterTableBucketNum(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { + analyzeAlterTableClusterSort(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_COMPACT) { + analyzeAlterTableCompact(ast, tableName, partSpec); + } + break; + } + case HiveParser.TOK_DROPTABLE: + analyzeDropTable(ast, false); + break; + case HiveParser.TOK_TRUNCATETABLE: + analyzeTruncateTable(ast); + break; + case HiveParser.TOK_CREATEINDEX: + analyzeCreateIndex(ast); + break; + case HiveParser.TOK_DROPINDEX: + analyzeDropIndex(ast); + break; + case HiveParser.TOK_DESCTABLE: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeDescribeTable(ast); + break; + case HiveParser.TOK_SHOWDATABASES: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowDatabases(ast); + break; + case HiveParser.TOK_SHOWTABLES: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowTables(ast); + break; + case HiveParser.TOK_SHOWCOLUMNS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowColumns(ast); + break; + case HiveParser.TOK_SHOW_TABLESTATUS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowTableStatus(ast); + break; + case HiveParser.TOK_SHOW_TBLPROPERTIES: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowTableProperties(ast); + break; + case HiveParser.TOK_SHOWFUNCTIONS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowFunctions(ast); + break; + case HiveParser.TOK_SHOWLOCKS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowLocks(ast); + break; + case HiveParser.TOK_SHOWDBLOCKS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowDbLocks(ast); + break; + case HiveParser.TOK_SHOW_COMPACTIONS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowCompactions(ast); + break; + case HiveParser.TOK_SHOW_TRANSACTIONS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowTxns(ast); + break; + case HiveParser.TOK_SHOWCONF: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowConf(ast); + break; + case HiveParser.TOK_DESCFUNCTION: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeDescFunction(ast); + break; + case HiveParser.TOK_DESCDATABASE: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeDescDatabase(ast); + break; + case HiveParser.TOK_MSCK: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeMetastoreCheck(ast); + break; + case HiveParser.TOK_DROPVIEW: + analyzeDropTable(ast, true); + break; + case HiveParser.TOK_ALTERVIEW_PROPERTIES: + analyzeAlterTableProps(ast, true, false); + break; + case HiveParser.TOK_DROPVIEW_PROPERTIES: + analyzeAlterTableProps(ast, true, true); + break; + case HiveParser.TOK_ALTERVIEW_ADDPARTS: + // for ALTER VIEW ADD PARTITION, we wrapped the ADD to discriminate + // view from table; unwrap it now + analyzeAlterTableAddParts((ASTNode) ast.getChild(0), true); + break; + case HiveParser.TOK_ALTERVIEW_DROPPARTS: + // for ALTER VIEW DROP PARTITION, we wrapped the DROP to + // discriminate + // view from table; unwrap it now + analyzeAlterTableDropParts((ASTNode) ast.getChild(0), true); + break; + case HiveParser.TOK_ALTERVIEW_RENAME: + // for ALTER VIEW RENAME, we wrapped the RENAME to discriminate + // view from table; unwrap it now + analyzeAlterTableRename(((ASTNode) ast.getChild(0)), true); + break; + case HiveParser.TOK_ALTERTABLE_RENAME: + analyzeAlterTableRename(ast, false); + break; + case HiveParser.TOK_ALTERTABLE_TOUCH: + analyzeAlterTableTouch(ast); + break; + case HiveParser.TOK_ALTERTABLE_ARCHIVE: + analyzeAlterTableArchive(ast, false); + break; + case HiveParser.TOK_ALTERTABLE_UNARCHIVE: + analyzeAlterTableArchive(ast, true); + break; + case HiveParser.TOK_ALTERTABLE_ADDCOLS: + analyzeAlterTableModifyCols(ast, AlterTableTypes.ADDCOLS); + break; + case HiveParser.TOK_ALTERTABLE_REPLACECOLS: + analyzeAlterTableModifyCols(ast, AlterTableTypes.REPLACECOLS); + break; + case HiveParser.TOK_ALTERTABLE_RENAMECOL: + analyzeAlterTableRenameCol(ast); + break; + case HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS: + analyzeAlterTableUpdateStats(ast); + break; + case HiveParser.TOK_ALTERTABLE_ADDPARTS: + analyzeAlterTableAddParts(ast, false); + break; + case HiveParser.TOK_ALTERTABLE_DROPPARTS: + analyzeAlterTableDropParts(ast, false); + break; + case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE: + analyzeAlterTablePartColType(ast); + break; + case HiveParser.TOK_ALTERTABLE_PROPERTIES: + analyzeAlterTableProps(ast, false, false); + break; + case HiveParser.TOK_DROPTABLE_PROPERTIES: + analyzeAlterTableProps(ast, false, true); + break; + case HiveParser.TOK_ALTERINDEX_REBUILD: + analyzeAlterIndexRebuild(ast); + break; + case HiveParser.TOK_ALTERINDEX_PROPERTIES: + analyzeAlterIndexProps(ast); + break; + case HiveParser.TOK_SHOWPARTITIONS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowPartitions(ast); + break; + case HiveParser.TOK_SHOW_CREATETABLE: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowCreateTable(ast); + break; + case HiveParser.TOK_SHOWINDEXES: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowIndexes(ast); + break; + case HiveParser.TOK_LOCKTABLE: + analyzeLockTable(ast); + break; + case HiveParser.TOK_UNLOCKTABLE: + analyzeUnlockTable(ast); + break; + case HiveParser.TOK_LOCKDB: + analyzeLockDatabase(ast); + break; + case HiveParser.TOK_UNLOCKDB: + analyzeUnlockDatabase(ast); + break; + case HiveParser.TOK_CREATEDATABASE: + analyzeCreateDatabase(ast); + break; + case HiveParser.TOK_DROPDATABASE: + analyzeDropDatabase(ast); + break; + case HiveParser.TOK_SWITCHDATABASE: + analyzeSwitchDatabase(ast); + break; + case HiveParser.TOK_ALTERDATABASE_PROPERTIES: + analyzeAlterDatabaseProperties(ast); + break; + case HiveParser.TOK_ALTERDATABASE_OWNER: + analyzeAlterDatabaseOwner(ast); + break; + case HiveParser.TOK_CREATEROLE: + analyzeCreateRole(ast); + break; + case HiveParser.TOK_DROPROLE: + analyzeDropRole(ast); + break; + case HiveParser.TOK_SHOW_ROLE_GRANT: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowRoleGrant(ast); + break; + case HiveParser.TOK_SHOW_ROLE_PRINCIPALS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowRolePrincipals(ast); + break; + case HiveParser.TOK_SHOW_ROLES: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowRoles(ast); + break; + case HiveParser.TOK_GRANT_ROLE: + analyzeGrantRevokeRole(true, ast); + break; + case HiveParser.TOK_REVOKE_ROLE: + analyzeGrantRevokeRole(false, ast); + break; + case HiveParser.TOK_GRANT: + analyzeGrant(ast); + break; + case HiveParser.TOK_SHOW_GRANT: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowGrant(ast); + break; + case HiveParser.TOK_REVOKE: + analyzeRevoke(ast); + break; + case HiveParser.TOK_ALTERTABLE_SKEWED: + analyzeAltertableSkewedby(ast); + break; + case HiveParser.TOK_EXCHANGEPARTITION: + analyzeExchangePartition(ast); + break; + case HiveParser.TOK_SHOW_SET_ROLE: + analyzeSetShowRole(ast); + break; + default: + throw new SemanticException("Unsupported command."); + } + if (fetchTask != null && !rootTasks.isEmpty()) { + rootTasks.get(rootTasks.size() - 1).setFetchSource(true); + } + } + + private void analyzeSetShowRole(ASTNode ast) throws SemanticException { + switch (ast.getChildCount()) { + case 0: + ctx.setResFile(ctx.getLocalTmpPath()); + rootTasks.add(hiveAuthorizationTaskFactory + .createShowCurrentRoleTask(getInputs(), getOutputs(), + ctx.getResFile())); + setFetchTask(createFetchTask(RoleDDLDesc.getRoleNameSchema())); + break; + case 1: + rootTasks.add(hiveAuthorizationTaskFactory.createSetRoleTask( + BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(0) + .getText()), getInputs(), getOutputs())); + break; + default: + throw new SemanticException( + "Internal error. ASTNode expected to have 0 or 1 child. " + + ast.dump()); + } + } + + private void analyzeGrantRevokeRole(boolean grant, ASTNode ast) + throws SemanticException { + Task task; + if (grant) { + task = hiveAuthorizationTaskFactory.createGrantRoleTask(ast, + getInputs(), getOutputs()); + } else { + task = hiveAuthorizationTaskFactory.createRevokeRoleTask(ast, + getInputs(), getOutputs()); + } + if (task != null) { + rootTasks.add(task); + } + } + + private void analyzeShowGrant(ASTNode ast) throws SemanticException { + Task task = hiveAuthorizationTaskFactory + .createShowGrantTask(ast, ctx.getResFile(), getInputs(), + getOutputs()); + if (task != null) { + rootTasks.add(task); + setFetchTask(createFetchTask(ShowGrantDesc.getSchema())); + } + } + + private void analyzeGrant(ASTNode ast) throws SemanticException { + Task task = hiveAuthorizationTaskFactory + .createGrantTask(ast, getInputs(), getOutputs()); + if (task != null) { + rootTasks.add(task); + } + } + + private void analyzeRevoke(ASTNode ast) throws SemanticException { + Task task = hiveAuthorizationTaskFactory + .createRevokeTask(ast, getInputs(), getOutputs()); + if (task != null) { + rootTasks.add(task); + } + } + + private void analyzeCreateRole(ASTNode ast) throws SemanticException { + Task task = hiveAuthorizationTaskFactory + .createCreateRoleTask(ast, getInputs(), getOutputs()); + if (task != null) { + rootTasks.add(task); + } + } + + private void analyzeDropRole(ASTNode ast) throws SemanticException { + Task task = hiveAuthorizationTaskFactory + .createDropRoleTask(ast, getInputs(), getOutputs()); + if (task != null) { + rootTasks.add(task); + } + } + + private void analyzeShowRoleGrant(ASTNode ast) throws SemanticException { + Task task = hiveAuthorizationTaskFactory + .createShowRoleGrantTask(ast, ctx.getResFile(), getInputs(), + getOutputs()); + if (task != null) { + rootTasks.add(task); + setFetchTask(createFetchTask(RoleDDLDesc.getRoleShowGrantSchema())); + } + } + + private void analyzeShowRolePrincipals(ASTNode ast) + throws SemanticException { + Task roleDDLTask = (Task) hiveAuthorizationTaskFactory + .createShowRolePrincipalsTask(ast, ctx.getResFile(), + getInputs(), getOutputs()); + + if (roleDDLTask != null) { + rootTasks.add(roleDDLTask); + setFetchTask(createFetchTask(RoleDDLDesc + .getShowRolePrincipalsSchema())); + } + } + + private void analyzeShowRoles(ASTNode ast) throws SemanticException { + Task roleDDLTask = (Task) hiveAuthorizationTaskFactory + .createShowRolesTask(ast, ctx.getResFile(), getInputs(), + getOutputs()); + + if (roleDDLTask != null) { + rootTasks.add(roleDDLTask); + setFetchTask(createFetchTask(RoleDDLDesc.getRoleNameSchema())); + } + } + + private void analyzeAlterDatabaseProperties(ASTNode ast) + throws SemanticException { + + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + Map dbProps = null; + + for (int i = 1; i < ast.getChildCount(); i++) { + ASTNode childNode = (ASTNode) ast.getChild(i); + switch (childNode.getToken().getType()) { + case HiveParser.TOK_DATABASEPROPERTIES: + dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode + .getChild(0)); + break; + default: + throw new SemanticException( + "Unrecognized token in CREATE DATABASE statement"); + } + } + AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, dbProps); + addAlterDbDesc(alterDesc); + } + + private void addAlterDbDesc(AlterDatabaseDesc alterDesc) + throws SemanticException { + Database database = getDatabase(alterDesc.getDatabaseName()); + outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK)); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterDesc), conf)); + } + + private void analyzeAlterDatabaseOwner(ASTNode ast) + throws SemanticException { + String dbName = getUnescapedName((ASTNode) ast.getChild(0)); + PrincipalDesc principalDesc = AuthorizationParseUtils + .getPrincipalDesc((ASTNode) ast.getChild(1)); + + // The syntax should not allow these fields to be null, but lets verify + String nullCmdMsg = "can't be null in alter database set owner command"; + if (principalDesc.getName() == null) { + throw new SemanticException("Owner name " + nullCmdMsg); + } + if (principalDesc.getType() == null) { + throw new SemanticException("Owner type " + nullCmdMsg); + } + + AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, + principalDesc); + addAlterDbDesc(alterDesc); + } + + private void analyzeExchangePartition(ASTNode ast) throws SemanticException { + Table destTable = getTable(getUnescapedName((ASTNode) ast.getChild(0))); + Table sourceTable = getTable(getUnescapedName((ASTNode) ast.getChild(2))); + + // Get the partition specs + Map partSpecs = getPartSpec((ASTNode) ast.getChild(1)); + validatePartitionValues(partSpecs); + boolean sameColumns = MetaStoreUtils.compareFieldColumns( + destTable.getAllCols(), sourceTable.getAllCols()); + boolean samePartitions = MetaStoreUtils.compareFieldColumns( + destTable.getPartitionKeys(), sourceTable.getPartitionKeys()); + if (!sameColumns || !samePartitions) { + throw new SemanticException( + ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg()); + } + // check if source partition exists + getPartitions(sourceTable, partSpecs, true); + + // Verify that the partitions specified are continuous + // If a subpartition value is specified without specifying a partition's + // value + // then we throw an exception + int counter = isPartitionValueContinuous( + sourceTable.getPartitionKeys(), partSpecs); + if (counter < 0) { + throw new SemanticException( + ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs + .toString())); + } + List destPartitions = null; + try { + destPartitions = getPartitions(destTable, partSpecs, true); + } catch (SemanticException ex) { + // We should expect a semantic exception being throw as this + // partition + // should not be present. + } + if (destPartitions != null) { + // If any destination partition is present then throw a Semantic + // Exception. + throw new SemanticException( + ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString())); + } + AlterTableExchangePartition alterTableExchangePartition = new AlterTableExchangePartition( + sourceTable, destTable, partSpecs); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTableExchangePartition), conf)); + } + + /** + * @param partitionKeys + * the list of partition keys of the table + * @param partSpecs + * the partition specs given by the user + * @return >=0 if no subpartition value is specified without a partition's + * value being specified else it returns -1 + */ + private int isPartitionValueContinuous(List partitionKeys, + Map partSpecs) { + int counter = 0; + for (FieldSchema partitionKey : partitionKeys) { + if (partSpecs.containsKey(partitionKey.getName())) { + counter++; + continue; + } + return partSpecs.size() == counter ? counter : -1; + } + return counter; + } + + private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + boolean ifNotExists = false; + String dbComment = null; + String dbLocation = null; + Map dbProps = null; + + for (int i = 1; i < ast.getChildCount(); i++) { + ASTNode childNode = (ASTNode) ast.getChild(i); + switch (childNode.getToken().getType()) { + case HiveParser.TOK_IFNOTEXISTS: + ifNotExists = true; + break; + case HiveParser.TOK_DATABASECOMMENT: + dbComment = unescapeSQLString(childNode.getChild(0).getText()); + break; + case TOK_DATABASEPROPERTIES: + dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode + .getChild(0)); + break; + case TOK_DATABASELOCATION: + dbLocation = unescapeSQLString(childNode.getChild(0).getText()); + addLocationToOutputs(dbLocation); + break; + default: + throw new SemanticException( + "Unrecognized token in CREATE DATABASE statement"); + } + } + + CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(dbName, + dbComment, dbLocation, ifNotExists); + if (dbProps != null) { + createDatabaseDesc.setDatabaseProperties(dbProps); + } + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createDatabaseDesc), conf)); + } + + private void analyzeDropDatabase(ASTNode ast) throws SemanticException { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + boolean ifExists = false; + boolean ifCascade = false; + + if (null != ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS)) { + ifExists = true; + } + + if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { + ifCascade = true; + } + + Database database = getDatabase(dbName, !ifExists); + if (database == null) { + return; + } + + // if cascade=true, then we need to authorize the drop table action as + // well + if (ifCascade) { + // add the tables as well to outputs + List tableNames; + // get names of all tables under this dbName + try { + tableNames = db.getAllTables(dbName); + } catch (HiveException e) { + throw new SemanticException(e); + } + // add tables to outputs + if (tableNames != null) { + for (String tableName : tableNames) { + Table table = getTable(dbName, tableName, true); + // We want no lock here, as the database lock will cover the + // tables, + // and putting a lock will actually cause us to deadlock on + // ourselves. + outputs.add(new WriteEntity(table, + WriteEntity.WriteType.DDL_NO_LOCK)); + } + } + } + inputs.add(new ReadEntity(database)); + outputs.add(new WriteEntity(database, + WriteEntity.WriteType.DDL_EXCLUSIVE)); + + DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, + ifExists, ifCascade); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + dropDatabaseDesc), conf)); + } + + private void analyzeSwitchDatabase(ASTNode ast) { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + SwitchDatabaseDesc switchDatabaseDesc = new SwitchDatabaseDesc(dbName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + switchDatabaseDesc), conf)); + } + + private void analyzeDropTable(ASTNode ast, boolean expectView) + throws SemanticException { + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); + // we want to signal an error if the table/view doesn't exist and we're + // configured not to fail silently + boolean throwException = !ifExists + && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); + Table tab = getTable(tableName, throwException); + if (tab != null) { + inputs.add(new ReadEntity(tab)); + outputs.add(new WriteEntity(tab, + WriteEntity.WriteType.DDL_EXCLUSIVE)); + } + + DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, + ifExists); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + dropTblDesc), conf)); + } + + private void analyzeTruncateTable(ASTNode ast) throws SemanticException { + ASTNode root = (ASTNode) ast.getChild(0); // TOK_TABLE_PARTITION + String tableName = getUnescapedName((ASTNode) root.getChild(0)); + + Table table = getTable(tableName, true); + if (table.getTableType() != TableType.MANAGED_TABLE) { + throw new SemanticException( + ErrorMsg.TRUNCATE_FOR_NON_MANAGED_TABLE.format(tableName)); + } + if (table.isNonNative()) { + throw new SemanticException( + ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); // TODO + } + if (!table.isPartitioned() && root.getChildCount() > 1) { + throw new SemanticException( + ErrorMsg.PARTSPEC_FOR_NON_PARTITIONED_TABLE + .format(tableName)); + } + Map partSpec = getPartSpec((ASTNode) root.getChild(1)); + if (partSpec == null) { + if (!table.isPartitioned()) { + outputs.add(new WriteEntity(table, + WriteEntity.WriteType.DDL_EXCLUSIVE)); + } else { + for (Partition partition : getPartitions(table, null, false)) { + outputs.add(new WriteEntity(partition, + WriteEntity.WriteType.DDL_EXCLUSIVE)); + } + } + } else { + if (isFullSpec(table, partSpec)) { + Partition partition = getPartition(table, partSpec, true); + outputs.add(new WriteEntity(partition, + WriteEntity.WriteType.DDL_EXCLUSIVE)); + } else { + for (Partition partition : getPartitions(table, partSpec, false)) { + outputs.add(new WriteEntity(partition, + WriteEntity.WriteType.DDL_EXCLUSIVE)); + } + } + } + + TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, + partSpec); + + DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), + truncateTblDesc); + Task truncateTask = TaskFactory.get(ddlWork, + conf); + + // Is this a truncate column command + List columnNames = null; + if (ast.getChildCount() == 2) { + try { + columnNames = getColumnNames((ASTNode) ast.getChild(1)); + + // Throw an error if the table is indexed + List indexes = db.getIndexes(table.getDbName(), + tableName, (short) 1); + if (indexes != null && indexes.size() > 0) { + throw new SemanticException( + ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg()); + } + + List bucketCols = null; + Class inputFormatClass = null; + boolean isArchived = false; + Path newTblPartLoc = null; + Path oldTblPartLoc = null; + List cols = null; + ListBucketingCtx lbCtx = null; + boolean isListBucketed = false; + List listBucketColNames = null; + + if (table.isPartitioned()) { + Partition part = db.getPartition(table, partSpec, false); + + Path tabPath = table.getPath(); + Path partPath = part.getDataLocation(); + + // if the table is in a different dfs than the partition, + // replace the partition's dfs with the table's dfs. + newTblPartLoc = new Path(tabPath.toUri().getScheme(), + tabPath.toUri().getAuthority(), partPath.toUri() + .getPath()); + + oldTblPartLoc = partPath; + + cols = part.getCols(); + bucketCols = part.getBucketCols(); + inputFormatClass = part.getInputFormatClass(); + isArchived = ArchiveUtils.isArchived(part); + lbCtx = constructListBucketingCtx(part.getSkewedColNames(), + part.getSkewedColValues(), + part.getSkewedColValueLocationMaps(), + part.isStoredAsSubDirectories(), conf); + isListBucketed = part.isStoredAsSubDirectories(); + listBucketColNames = part.getSkewedColNames(); + } else { + // input and output are the same + oldTblPartLoc = table.getPath(); + newTblPartLoc = table.getPath(); + cols = table.getCols(); + bucketCols = table.getBucketCols(); + inputFormatClass = table.getInputFormatClass(); + lbCtx = constructListBucketingCtx( + table.getSkewedColNames(), + table.getSkewedColValues(), + table.getSkewedColValueLocationMaps(), + table.isStoredAsSubDirectories(), conf); + isListBucketed = table.isStoredAsSubDirectories(); + listBucketColNames = table.getSkewedColNames(); + } + + // throw a HiveException for non-rcfile. + if (!inputFormatClass.equals(RCFileInputFormat.class)) { + throw new SemanticException( + ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg()); + } + + // throw a HiveException if the table/partition is archived + if (isArchived) { + throw new SemanticException( + ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg()); + } + + Set columnIndexes = new HashSet(); + for (String columnName : columnNames) { + boolean found = false; + for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) { + if (columnName.equalsIgnoreCase(cols.get(columnIndex) + .getName())) { + columnIndexes.add(columnIndex); + found = true; + break; + } + } + // Throw an exception if the user is trying to truncate a + // column which doesn't exist + if (!found) { + throw new SemanticException( + ErrorMsg.INVALID_COLUMN.getMsg(columnName)); + } + // Throw an exception if the table/partition is bucketed on + // one of the columns + for (String bucketCol : bucketCols) { + if (bucketCol.equalsIgnoreCase(columnName)) { + throw new SemanticException( + ErrorMsg.TRUNCATE_BUCKETED_COLUMN + .getMsg(columnName)); + } + } + if (isListBucketed) { + for (String listBucketCol : listBucketColNames) { + if (listBucketCol.equalsIgnoreCase(columnName)) { + throw new SemanticException( + ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN + .getMsg(columnName)); + } + } + } + } + + truncateTblDesc.setColumnIndexes(new ArrayList( + columnIndexes)); + + truncateTblDesc.setInputDir(oldTblPartLoc); + addInputsOutputsAlterTable(tableName, partSpec); + + truncateTblDesc.setLbCtx(lbCtx); + + addInputsOutputsAlterTable(tableName, partSpec); + ddlWork.setNeedLock(true); + TableDesc tblDesc = Utilities.getTableDesc(table); + // Write the output to temporary directory and move it to the + // final location at the end + // so the operation is atomic. + Path queryTmpdir = ctx + .getExternalTmpPath(newTblPartLoc.toUri()); + truncateTblDesc.setOutputDir(queryTmpdir); + LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, + partSpec == null ? new HashMap() + : partSpec); + ltd.setLbCtx(lbCtx); + Task moveTsk = TaskFactory.get(new MoveWork(null, + null, ltd, null, false), conf); + truncateTask.addDependentTask(moveTsk); + + // Recalculate the HDFS stats if auto gather stats is set + if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + StatsWork statDesc; + if (oldTblPartLoc.equals(newTblPartLoc)) { + // If we're merging to the same location, we can avoid + // some metastore calls + tableSpec tablepart = new tableSpec(this.db, conf, root); + statDesc = new StatsWork(tablepart); + } else { + statDesc = new StatsWork(ltd); + } + statDesc.setNoStatsAggregator(true); + statDesc.setClearAggregatorStats(true); + statDesc.setStatsReliable(conf + .getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE)); + Task statTask = TaskFactory.get( + statDesc, conf); + moveTsk.addDependentTask(statTask); + } + } catch (HiveException e) { + throw new SemanticException(e); + } + } + + rootTasks.add(truncateTask); + } + + private boolean isFullSpec(Table table, Map partSpec) { + for (FieldSchema partCol : table.getPartCols()) { + if (partSpec.get(partCol.getName()) == null) { + return false; + } + } + return true; + } + + private void analyzeCreateIndex(ASTNode ast) throws SemanticException { + String indexName = unescapeIdentifier(ast.getChild(0).getText()); + String typeName = unescapeSQLString(ast.getChild(1).getText()); + String tableName = getUnescapedName((ASTNode) ast.getChild(2)); + List indexedCols = getColumnNames((ASTNode) ast.getChild(3)); + + IndexType indexType = HiveIndex.getIndexType(typeName); + if (indexType != null) { + typeName = indexType.getHandlerClsName(); + } else { + try { + Class.forName(typeName); + } catch (Exception e) { + throw new SemanticException( + "class name provided for index handler not found.", e); + } + } + + String indexTableName = null; + boolean deferredRebuild = false; + String location = null; + Map tblProps = null; + Map idxProps = null; + String indexComment = null; + + RowFormatParams rowFormatParams = new RowFormatParams(); + StorageFormat storageFormat = new StorageFormat(conf); + + for (int idx = 4; idx < ast.getChildCount(); idx++) { + ASTNode child = (ASTNode) ast.getChild(idx); + if (storageFormat.fillStorageFormat(child)) { + continue; + } + switch (child.getToken().getType()) { + case HiveParser.TOK_TABLEROWFORMAT: + rowFormatParams.analyzeRowFormat(child); + break; + case HiveParser.TOK_CREATEINDEX_INDEXTBLNAME: + ASTNode ch = (ASTNode) child.getChild(0); + indexTableName = getUnescapedName(ch); + break; + case HiveParser.TOK_DEFERRED_REBUILDINDEX: + deferredRebuild = true; + break; + case HiveParser.TOK_TABLELOCATION: + location = unescapeSQLString(child.getChild(0).getText()); + addLocationToOutputs(location); + break; + case HiveParser.TOK_TABLEPROPERTIES: + tblProps = DDLSemanticAnalyzer.getProps((ASTNode) child + .getChild(0)); + break; + case HiveParser.TOK_INDEXPROPERTIES: + idxProps = DDLSemanticAnalyzer.getProps((ASTNode) child + .getChild(0)); + break; + case HiveParser.TOK_TABLESERIALIZER: + child = (ASTNode) child.getChild(0); + storageFormat.setSerde(unescapeSQLString(child.getChild(0) + .getText())); + if (child.getChildCount() == 2) { + readProps((ASTNode) (child.getChild(1).getChild(0)), + storageFormat.getSerdeProps()); + } + break; + case HiveParser.TOK_INDEXCOMMENT: + child = (ASTNode) child.getChild(0); + indexComment = unescapeSQLString(child.getText()); + } + } + + storageFormat.fillDefaultStorageFormat(); + + CreateIndexDesc crtIndexDesc = new CreateIndexDesc(tableName, + indexName, indexedCols, indexTableName, deferredRebuild, + storageFormat.getInputFormat(), + storageFormat.getOutputFormat(), + storageFormat.getStorageHandler(), typeName, location, + idxProps, tblProps, storageFormat.getSerde(), + storageFormat.getSerdeProps(), rowFormatParams.collItemDelim, + rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, + rowFormatParams.lineDelim, rowFormatParams.mapKeyDelim, + indexComment); + Task createIndex = TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), crtIndexDesc), conf); + rootTasks.add(createIndex); + } + + private void analyzeDropIndex(ASTNode ast) throws SemanticException { + String indexName = unescapeIdentifier(ast.getChild(0).getText()); + String tableName = getUnescapedName((ASTNode) ast.getChild(1)); + boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); + // we want to signal an error if the index doesn't exist and we're + // configured not to ignore this + boolean throwException = !ifExists + && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); + if (throwException) { + try { + Index idx = db.getIndex(tableName, indexName); + } catch (HiveException e) { + throw new SemanticException( + ErrorMsg.INVALID_INDEX.getMsg(indexName)); + } + } + + DropIndexDesc dropIdxDesc = new DropIndexDesc(indexName, tableName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + dropIdxDesc), conf)); + } + + private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { + String baseTableName = unescapeIdentifier(ast.getChild(0).getText()); + String indexName = unescapeIdentifier(ast.getChild(1).getText()); + HashMap partSpec = null; + Tree part = ast.getChild(2); + if (part != null) { + partSpec = extractPartitionSpecs(part); + } + List> indexBuilder = getIndexBuilderMapRed(baseTableName, + indexName, partSpec); + rootTasks.addAll(indexBuilder); + + // Handle updating index timestamps + AlterIndexDesc alterIdxDesc = new AlterIndexDesc( + AlterIndexTypes.UPDATETIMESTAMP); + alterIdxDesc.setIndexName(indexName); + alterIdxDesc.setBaseTableName(baseTableName); + alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase()); + alterIdxDesc.setSpec(partSpec); + + Task tsTask = TaskFactory.get(new DDLWork(alterIdxDesc), conf); + for (Task t : indexBuilder) { + t.addDependentTask(tsTask); + } + } + + private void analyzeAlterIndexProps(ASTNode ast) throws SemanticException { + + String baseTableName = getUnescapedName((ASTNode) ast.getChild(0)); + String indexName = unescapeIdentifier(ast.getChild(1).getText()); + HashMap mapProp = getProps((ASTNode) (ast.getChild(2)) + .getChild(0)); + + AlterIndexDesc alterIdxDesc = new AlterIndexDesc( + AlterIndexTypes.ADDPROPS); + alterIdxDesc.setProps(mapProp); + alterIdxDesc.setIndexName(indexName); + alterIdxDesc.setBaseTableName(baseTableName); + alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase()); + + rootTasks.add(TaskFactory.get(new DDLWork(alterIdxDesc), conf)); + } + + private List> getIndexBuilderMapRed(String baseTableName, + String indexName, HashMap partSpec) + throws SemanticException { + try { + String dbName = SessionState.get().getCurrentDatabase(); + Index index = db.getIndex(dbName, baseTableName, indexName); + Table indexTbl = getTable(index.getIndexTableName()); + String baseTblName = index.getOrigTableName(); + Table baseTbl = getTable(baseTblName); + + String handlerCls = index.getIndexHandlerClass(); + HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, + handlerCls); + + List indexTblPartitions = null; + List baseTblPartitions = null; + if (indexTbl != null) { + indexTblPartitions = new ArrayList(); + baseTblPartitions = preparePartitions(baseTbl, partSpec, + indexTbl, db, indexTblPartitions); + } + + List> ret = handler.generateIndexBuildTaskList(baseTbl, + index, indexTblPartitions, baseTblPartitions, indexTbl, + getInputs(), getOutputs()); + return ret; + } catch (Exception e) { + throw new SemanticException(e); + } + } + + private List preparePartitions( + org.apache.hadoop.hive.ql.metadata.Table baseTbl, + HashMap partSpec, + org.apache.hadoop.hive.ql.metadata.Table indexTbl, Hive db, + List indexTblPartitions) throws HiveException, + MetaException { + List baseTblPartitions = new ArrayList(); + if (partSpec != null) { + // if partspec is specified, then only producing index for that + // partition + Partition part = db.getPartition(baseTbl, partSpec, false); + if (part == null) { + throw new HiveException("Partition " + + Warehouse.makePartName(partSpec, false) + + " does not exist in table " + baseTbl.getTableName()); + } + baseTblPartitions.add(part); + Partition indexPart = db.getPartition(indexTbl, partSpec, false); + if (indexPart == null) { + indexPart = db.createPartition(indexTbl, partSpec); + } + indexTblPartitions.add(indexPart); + } else if (baseTbl.isPartitioned()) { + // if no partition is specified, create indexes for all partitions + // one + // by one. + baseTblPartitions = db.getPartitions(baseTbl); + for (Partition basePart : baseTblPartitions) { + HashMap pSpec = basePart.getSpec(); + Partition indexPart = db.getPartition(indexTbl, pSpec, false); + if (indexPart == null) { + indexPart = db.createPartition(indexTbl, pSpec); + } + indexTblPartitions.add(indexPart); + } + } + return baseTblPartitions; + } + + private void validateAlterTableType(Table tbl, AlterTableTypes op) + throws SemanticException { + validateAlterTableType(tbl, op, false); + } + + private void validateAlterTableType(Table tbl, AlterTableTypes op, + boolean expectView) throws SemanticException { + if (tbl.isView()) { + if (!expectView) { + throw new SemanticException( + ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg()); + } + + switch (op) { + case ADDPARTITION: + case DROPPARTITION: + case RENAMEPARTITION: + case ADDPROPS: + case DROPPROPS: + case RENAME: + // allow this form + break; + default: + throw new SemanticException( + ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString())); + } + } else { + if (expectView) { + throw new SemanticException( + ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg()); + } + } + if (tbl.isNonNative()) { + throw new SemanticException( + ErrorMsg.ALTER_TABLE_NON_NATIVE.getMsg(tbl.getTableName())); + } + } + + private void analyzeAlterTableProps(ASTNode ast, boolean expectView, + boolean isUnset) throws SemanticException { + + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) + .getChild(0)); + AlterTableDesc alterTblDesc = null; + if (isUnset == true) { + alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, + expectView); + if (ast.getChild(2) != null) { + alterTblDesc.setDropIfExists(true); + } + } else { + alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, + expectView); + } + alterTblDesc.setProps(mapProp); + alterTblDesc.setOldName(tableName); + + addInputsOutputsAlterTable(tableName, null, alterTblDesc); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + HashMap mapProp = getProps((ASTNode) (ast.getChild(0)) + .getChild(0)); + AlterTableDesc alterTblDesc = new AlterTableDesc( + AlterTableTypes.ADDSERDEPROPS); + alterTblDesc.setProps(mapProp); + alterTblDesc.setOldName(tableName); + alterTblDesc.setPartSpec(partSpec); + + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void analyzeAlterTableSerde(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + + String serdeName = unescapeSQLString(ast.getChild(0).getText()); + AlterTableDesc alterTblDesc = new AlterTableDesc( + AlterTableTypes.ADDSERDE); + if (ast.getChildCount() > 1) { + HashMap mapProp = getProps((ASTNode) (ast + .getChild(1)).getChild(0)); + alterTblDesc.setProps(mapProp); + } + alterTblDesc.setOldName(tableName); + alterTblDesc.setSerdeName(serdeName); + alterTblDesc.setPartSpec(partSpec); + + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + + StorageFormat format = new StorageFormat(conf); + ASTNode child = (ASTNode) ast.getChild(0); + + if (!format.fillStorageFormat(child)) { + throw new AssertionError("Unknown token " + child.getText()); + } + + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, + format.getInputFormat(), format.getOutputFormat(), + format.getSerde(), format.getStorageHandler(), partSpec); + + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void addInputsOutputsAlterTable(String tableName, + Map partSpec) throws SemanticException { + addInputsOutputsAlterTable(tableName, partSpec, null); + } + + private void addInputsOutputsAlterTable(String tableName, + Map partSpec, AlterTableDesc desc) + throws SemanticException { + Table tab = getTable(tableName, true); + // Determine the lock type to acquire + WriteEntity.WriteType writeType = desc == null ? WriteEntity.WriteType.DDL_EXCLUSIVE + : WriteEntity.determineAlterTableWriteType(desc.getOp()); + if (partSpec == null || partSpec.isEmpty()) { + inputs.add(new ReadEntity(tab)); + outputs.add(new WriteEntity(tab, writeType)); + } else { + ReadEntity re = new ReadEntity(tab); + // In the case of altering a table for its partitions we don't need + // to lock the table + // itself, just the partitions. But the table will have a + // ReadEntity. So mark that + // ReadEntity as no lock. + re.noLockNeeded(); + inputs.add(re); + if (desc == null + || desc.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { + Partition part = getPartition(tab, partSpec, true); + outputs.add(new WriteEntity(part, writeType)); + } else { + for (Partition part : getPartitions(tab, partSpec, true)) { + outputs.add(new WriteEntity(part, writeType)); + } + } + } + + if (desc != null) { + validateAlterTableType(tab, desc.getOp(), desc.getExpectView()); + + // validate Unset Non Existed Table Properties + if (desc.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS + && desc.getIsDropIfExists() == false) { + Iterator keyItr = desc.getProps().keySet().iterator(); + while (keyItr.hasNext()) { + String currKey = keyItr.next(); + if (tab.getTTable().getParameters().containsKey(currKey) == false) { + String errorMsg = "The following property " + currKey + + " does not exist in " + tab.getTableName(); + throw new SemanticException( + ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY + .getMsg(errorMsg)); + } + } + } + } + } + + private void analyzeAlterTableLocation(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + + String newLocation = unescapeSQLString(ast.getChild(0).getText()); + addLocationToOutputs(newLocation); + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, + newLocation, partSpec); + + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + + } + + private void analyzeAlterTableProtectMode(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + + AlterTableDesc alterTblDesc = new AlterTableDesc( + AlterTableTypes.ALTERPROTECTMODE); + + alterTblDesc.setOldName(tableName); + alterTblDesc.setPartSpec(partSpec); + + ASTNode child = (ASTNode) ast.getChild(0); + + switch (child.getToken().getType()) { + case HiveParser.TOK_ENABLE: + alterTblDesc.setProtectModeEnable(true); + break; + case HiveParser.TOK_DISABLE: + alterTblDesc.setProtectModeEnable(false); + break; + default: + throw new SemanticException( + "Set Protect mode Syntax parsing error."); + } + + ASTNode grandChild = (ASTNode) child.getChild(0); + switch (grandChild.getToken().getType()) { + case HiveParser.TOK_OFFLINE: + alterTblDesc + .setProtectModeType(AlterTableDesc.ProtectModeType.OFFLINE); + break; + case HiveParser.TOK_NO_DROP: + if (grandChild.getChildCount() > 0) { + alterTblDesc + .setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP_CASCADE); + } else { + alterTblDesc + .setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP); + } + break; + case HiveParser.TOK_READONLY: + throw new SemanticException( + "Potect mode READONLY is not implemented"); + default: + throw new SemanticException( + "Only protect mode NO_DROP or OFFLINE supported"); + } + + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void analyzeAlterTablePartMergeFiles(ASTNode tablePartAST, + ASTNode ast, String tableName, HashMap partSpec) + throws SemanticException { + AlterTablePartMergeFilesDesc mergeDesc = new AlterTablePartMergeFilesDesc( + tableName, partSpec); + + List inputDir = new ArrayList(); + Path oldTblPartLoc = null; + Path newTblPartLoc = null; + Table tblObj = null; + ListBucketingCtx lbCtx = null; + + try { + tblObj = getTable(tableName); + + List bucketCols = null; + Class inputFormatClass = null; + boolean isArchived = false; + boolean checkIndex = HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVE_CONCATENATE_CHECK_INDEX); + if (checkIndex) { + List indexes = db.getIndexes(tblObj.getDbName(), + tableName, Short.MAX_VALUE); + if (indexes != null && indexes.size() > 0) { + throw new SemanticException( + "can not do merge because source table " + + tableName + " is indexed."); + } + } + + if (tblObj.isPartitioned()) { + if (partSpec == null) { + throw new SemanticException("source table " + tableName + + " is partitioned but no partition desc found."); + } else { + Partition part = getPartition(tblObj, partSpec, false); + if (part == null) { + throw new SemanticException("source table " + tableName + + " is partitioned but partition not found."); + } + bucketCols = part.getBucketCols(); + inputFormatClass = part.getInputFormatClass(); + isArchived = ArchiveUtils.isArchived(part); + + Path tabPath = tblObj.getPath(); + Path partPath = part.getDataLocation(); + + // if the table is in a different dfs than the partition, + // replace the partition's dfs with the table's dfs. + newTblPartLoc = new Path(tabPath.toUri().getScheme(), + tabPath.toUri().getAuthority(), partPath.toUri() + .getPath()); + + oldTblPartLoc = partPath; + + lbCtx = constructListBucketingCtx(part.getSkewedColNames(), + part.getSkewedColValues(), + part.getSkewedColValueLocationMaps(), + part.isStoredAsSubDirectories(), conf); + } + } else { + inputFormatClass = tblObj.getInputFormatClass(); + bucketCols = tblObj.getBucketCols(); + + // input and output are the same + oldTblPartLoc = tblObj.getPath(); + newTblPartLoc = tblObj.getPath(); + + lbCtx = constructListBucketingCtx(tblObj.getSkewedColNames(), + tblObj.getSkewedColValues(), + tblObj.getSkewedColValueLocationMaps(), + tblObj.isStoredAsSubDirectories(), conf); + } + + // throw a HiveException for non-rcfile. + if (!inputFormatClass.equals(RCFileInputFormat.class)) { + throw new SemanticException( + "Only RCFileFormat is supportted right now."); + } + + // throw a HiveException if the table/partition is bucketized + if (bucketCols != null && bucketCols.size() > 0) { + throw new SemanticException( + "Merge can not perform on bucketized partition/table."); + } + + // throw a HiveException if the table/partition is archived + if (isArchived) { + throw new SemanticException( + "Merge can not perform on archived partitions."); + } + + inputDir.add(oldTblPartLoc); + + mergeDesc.setInputDir(inputDir); + + mergeDesc.setLbCtx(lbCtx); + + addInputsOutputsAlterTable(tableName, partSpec); + DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc); + ddlWork.setNeedLock(true); + Task mergeTask = TaskFactory.get(ddlWork, + conf); + TableDesc tblDesc = Utilities.getTableDesc(tblObj); + Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri()); + mergeDesc.setOutputDir(queryTmpdir); + LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, + partSpec == null ? new HashMap() : partSpec); + ltd.setLbCtx(lbCtx); + Task moveTsk = TaskFactory.get(new MoveWork(null, null, + ltd, null, false), conf); + mergeTask.addDependentTask(moveTsk); + + if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + StatsWork statDesc; + if (oldTblPartLoc.equals(newTblPartLoc)) { + // If we're merging to the same location, we can avoid some + // metastore calls + tableSpec tablepart = new tableSpec(this.db, conf, + tablePartAST); + statDesc = new StatsWork(tablepart); + } else { + statDesc = new StatsWork(ltd); + } + statDesc.setNoStatsAggregator(true); + statDesc.setClearAggregatorStats(true); + statDesc.setStatsReliable(conf + .getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE)); + Task statTask = TaskFactory.get( + statDesc, conf); + moveTsk.addDependentTask(statTask); + } + + rootTasks.add(mergeTask); + } catch (Exception e) { + throw new SemanticException(e); + } + } + + private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + addInputsOutputsAlterTable(tableName, partSpec); + + AlterTableDesc alterTblDesc; + switch (ast.getChild(0).getType()) { + case HiveParser.TOK_NOT_CLUSTERED: + alterTblDesc = new AlterTableDesc(tableName, -1, + new ArrayList(), new ArrayList(), partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), alterTblDesc), conf)); + break; + case HiveParser.TOK_NOT_SORTED: + alterTblDesc = new AlterTableDesc(tableName, true, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), alterTblDesc), conf)); + break; + case HiveParser.TOK_TABLEBUCKETS: + ASTNode buckets = (ASTNode) ast.getChild(0); + List bucketCols = getColumnNames((ASTNode) buckets + .getChild(0)); + List sortCols = new ArrayList(); + int numBuckets = -1; + if (buckets.getChildCount() == 2) { + numBuckets = (Integer.valueOf(buckets.getChild(1).getText())) + .intValue(); + } else { + sortCols = getColumnNamesOrder((ASTNode) buckets.getChild(1)); + numBuckets = (Integer.valueOf(buckets.getChild(2).getText())) + .intValue(); + } + if (numBuckets <= 0) { + throw new SemanticException( + ErrorMsg.INVALID_BUCKET_NUMBER.getMsg()); + } + + alterTblDesc = new AlterTableDesc(tableName, numBuckets, + bucketCols, sortCols, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), alterTblDesc), conf)); + break; + } + } + + private void analyzeAlterTableCompact(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + + String type = unescapeSQLString(ast.getChild(0).getText()) + .toLowerCase(); + + if (!type.equals("minor") && !type.equals("major")) { + throw new SemanticException( + ErrorMsg.INVALID_COMPACTION_TYPE.getMsg()); + } + + LinkedHashMap newPartSpec = null; + if (partSpec != null) + newPartSpec = new LinkedHashMap(partSpec); + + AlterTableSimpleDesc desc = new AlterTableSimpleDesc(SessionState.get() + .getCurrentDatabase(), tableName, newPartSpec, type); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + desc), conf)); + } + + static HashMap getProps(ASTNode prop) { + HashMap mapProp = new HashMap(); + readProps(prop, mapProp); + return mapProp; + } + + /** + * Utility class to resolve QualifiedName + */ + static class QualifiedNameUtil { + + // delimiter to check DOT delimited qualified names + static String delimiter = "\\."; + + /** + * Get the fully qualified name in the ast. e.g. the ast of the form + * ^(DOT ^(DOT a b) c) will generate a name of the form a.b.c + * + * @param ast + * The AST from which the qualified name has to be extracted + * @return String + */ + static public String getFullyQualifiedName(ASTNode ast) { + if (ast.getChildCount() == 0) { + return ast.getText(); + } else if (ast.getChildCount() == 2) { + return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "." + + getFullyQualifiedName((ASTNode) ast.getChild(1)); + } else if (ast.getChildCount() == 3) { + return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "." + + getFullyQualifiedName((ASTNode) ast.getChild(1)) + + "." + + getFullyQualifiedName((ASTNode) ast.getChild(2)); + } else { + return null; + } + } + + // assume the first component of DOT delimited name is tableName + // get the attemptTableName + static public String getAttemptTableName(Hive db, String qualifiedName, + boolean isColumn) { + // check whether the name starts with table + // DESCRIBE table + // DESCRIBE table.column + // DECRIBE table column + String tableName = qualifiedName.substring(0, qualifiedName + .indexOf('.') == -1 ? qualifiedName.length() + : qualifiedName.indexOf('.')); + try { + Table tab = db.getTable(tableName); + if (tab != null) { + if (isColumn) { + // if attempt to get columnPath + // return the whole qualifiedName(table.column or table) + return qualifiedName; + } else { + // if attempt to get tableName + // return table + return tableName; + } + } + } catch (HiveException e) { + // assume the first DOT delimited component is tableName + // OK if it is not + // do nothing when having exception + return null; + } + return null; + } + + // get Database Name + static public String getDBName(Hive db, ASTNode ast) { + String dbName = null; + String fullyQualifiedName = getFullyQualifiedName(ast); + + // if database.table or database.table.column or table.column + // first try the first component of the DOT separated name + if (ast.getChildCount() >= 2) { + dbName = fullyQualifiedName.substring(0, fullyQualifiedName + .indexOf('.') == -1 ? fullyQualifiedName.length() + : fullyQualifiedName.indexOf('.')); + try { + // if the database name is not valid + // it is table.column + // return null as dbName + if (!db.databaseExists(dbName)) { + return null; + } + } catch (HiveException e) { + return null; + } + } else { + // in other cases, return null + // database is not validated if null + return null; + } + return dbName; + } + + // get Table Name + static public String getTableName(Hive db, ASTNode ast) + throws SemanticException { + String tableName = null; + String fullyQualifiedName = getFullyQualifiedName(ast); + + // assume the first component of DOT delimited name is tableName + String attemptTableName = getAttemptTableName(db, + fullyQualifiedName, false); + if (attemptTableName != null) { + return attemptTableName; + } + + // if the name does not start with table + // it should start with database + // DESCRIBE database.table + // DESCRIBE database.table column + if (fullyQualifiedName.split(delimiter).length == 3) { + // if DESCRIBE database.table.column + // invalid syntax exception + if (ast.getChildCount() == 2) { + throw new SemanticException( + ErrorMsg.INVALID_TABLE_OR_COLUMN + .getMsg(fullyQualifiedName)); + } else { + // if DESCRIBE database.table column + // return database.table as tableName + tableName = fullyQualifiedName.substring(0, + fullyQualifiedName.lastIndexOf('.')); + } + } else if (fullyQualifiedName.split(delimiter).length == 2) { + // if DESCRIBE database.table + // return database.table as tableName + tableName = fullyQualifiedName; + } else { + // if fullyQualifiedName only have one component + // it is an invalid table + throw new SemanticException( + ErrorMsg.INVALID_TABLE.getMsg(fullyQualifiedName)); + } + + return tableName; + } + + // get column path + static public String getColPath(Hive db, ASTNode parentAst, + ASTNode ast, String tableName, Map partSpec) { + + // if parent has two children + // it could be DESCRIBE table key + // or DESCRIBE table partition + if (parentAst.getChildCount() == 2 && partSpec == null) { + // if partitionSpec is null + // it is DESCRIBE table key + // return table as columnPath + return getFullyQualifiedName(parentAst); + } + + // assume the first component of DOT delimited name is tableName + String attemptTableName = getAttemptTableName(db, tableName, true); + if (attemptTableName != null) { + return attemptTableName; + } + + // if the name does not start with table + // it should start with database + // DESCRIBE database.table + // DESCRIBE database.table column + if (tableName.split(delimiter).length == 3) { + // if DESCRIBE database.table column + // return table.column as column path + return tableName.substring(tableName.indexOf(".") + 1, + tableName.length()); + } + + // in other cases, column path is the same as tableName + return tableName; + } + + // get partition metadata + static public Map getPartitionSpec(Hive db, + ASTNode ast, String tableName) throws SemanticException { + // if ast has two children + // it could be DESCRIBE table key + // or DESCRIBE table partition + // check whether it is DESCRIBE table partition + if (ast.getChildCount() == 2) { + ASTNode partNode = (ASTNode) ast.getChild(1); + HashMap partSpec = null; + try { + partSpec = getPartSpec(partNode); + } catch (SemanticException e) { + // get exception in resolving partition + // it could be DESCRIBE table key + // return null + // continue processing for DESCRIBE table key + return null; + } + + Table tab = null; + try { + tab = db.getTable(tableName); + } catch (HiveException e) { + // if table not valid + // throw semantic exception + throw new SemanticException( + ErrorMsg.INVALID_TABLE.getMsg(tableName), e); + } + + if (partSpec != null) { + Partition part = null; + try { + part = db.getPartition(tab, partSpec, false); + } catch (HiveException e) { + // if get exception in finding partition + // it could be DESCRIBE table key + // return null + // continue processing for DESCRIBE table key + return null; + } + + // if partition is not found + // it is DESCRIBE table partition + // invalid partition exception + if (part == null) { + throw new SemanticException( + ErrorMsg.INVALID_PARTITION.getMsg(partSpec + .toString())); + } + + // it is DESCRIBE table partition + // return partition metadata + return partSpec; + } + } + + return null; + } + + } + + /** + * Create a FetchTask for a given thrift ddl schema. + * + * @param schema + * thrift ddl + */ + private FetchTask createFetchTask(String schema) { + Properties prop = new Properties(); + + prop.setProperty(serdeConstants.SERIALIZATION_FORMAT, "9"); + prop.setProperty(serdeConstants.SERIALIZATION_NULL_FORMAT, " "); + String[] colTypes = schema.split("#"); + prop.setProperty("columns", colTypes[0]); + prop.setProperty("columns.types", colTypes[1]); + prop.setProperty(serdeConstants.SERIALIZATION_LIB, + LazySimpleSerDe.class.getName()); + FetchWork fetch = new FetchWork(ctx.getResFile(), new TableDesc( + TextInputFormat.class, IgnoreKeyTextOutputFormat.class, prop), + -1); + fetch.setSerializationNullFormat(" "); + return (FetchTask) TaskFactory.get(fetch, conf); + } + + private void validateDatabase(String databaseName) throws SemanticException { + try { + if (!db.databaseExists(databaseName)) { + throw new SemanticException( + ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName)); + } + } catch (HiveException e) { + throw new SemanticException( + ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName), e); + } + } + + private void validateTable(String tableName, Map partSpec) + throws SemanticException { + Table tab = getTable(tableName); + if (partSpec != null) { + getPartition(tab, partSpec, true); + } + } + + private void analyzeDescribeTable(ASTNode ast) throws SemanticException { + ASTNode tableTypeExpr = (ASTNode) ast.getChild(0); + + String qualifiedName = QualifiedNameUtil + .getFullyQualifiedName((ASTNode) tableTypeExpr.getChild(0)); + String tableName = QualifiedNameUtil.getTableName(db, + (ASTNode) (tableTypeExpr.getChild(0))); + String dbName = QualifiedNameUtil.getDBName(db, + (ASTNode) (tableTypeExpr.getChild(0))); + + Map partSpec = QualifiedNameUtil.getPartitionSpec(db, + tableTypeExpr, tableName); + + String colPath = QualifiedNameUtil.getColPath(db, tableTypeExpr, + (ASTNode) tableTypeExpr.getChild(0), qualifiedName, partSpec); + + // if database is not the one currently using + // validate database + if (dbName != null) { + validateDatabase(dbName); + } + if (partSpec != null) { + validateTable(tableName, partSpec); + } + + DescTableDesc descTblDesc = new DescTableDesc(ctx.getResFile(), + tableName, partSpec, colPath); + + boolean showColStats = false; + if (ast.getChildCount() == 2) { + int descOptions = ast.getChild(1).getType(); + descTblDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED); + descTblDesc.setExt(descOptions == HiveParser.KW_EXTENDED); + descTblDesc.setPretty(descOptions == HiveParser.KW_PRETTY); + // in case of "DESCRIBE FORMATTED tablename column_name" statement, + // colPath + // will contain tablename.column_name. If column_name is not + // specified + // colPath will be equal to tableName. This is how we can + // differentiate + // if we are describing a table or column + if (!colPath.equalsIgnoreCase(tableName) + && descTblDesc.isFormatted()) { + showColStats = true; + } + } + + inputs.add(new ReadEntity(getTable(tableName))); + Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + descTblDesc), conf); + rootTasks.add(ddlTask); + String schema = DescTableDesc.getSchema(showColStats); + setFetchTask(createFetchTask(schema)); + LOG.info("analyzeDescribeTable done"); + } + + /** + * Describe database. + * + * @param ast + * @throws SemanticException + */ + private void analyzeDescDatabase(ASTNode ast) throws SemanticException { + + boolean isExtended; + String dbName; + + if (ast.getChildCount() == 1) { + dbName = stripQuotes(ast.getChild(0).getText()); + isExtended = false; + } else if (ast.getChildCount() == 2) { + dbName = stripQuotes(ast.getChild(0).getText()); + isExtended = true; + } else { + throw new SemanticException( + "Unexpected Tokens at DESCRIBE DATABASE"); + } + + DescDatabaseDesc descDbDesc = new DescDatabaseDesc(ctx.getResFile(), + dbName, isExtended); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + descDbDesc), conf)); + setFetchTask(createFetchTask(descDbDesc.getSchema())); + } + + public static HashMap getPartSpec(ASTNode partspec) + throws SemanticException { + if (partspec == null) { + return null; + } + HashMap partSpec = new LinkedHashMap(); + for (int i = 0; i < partspec.getChildCount(); ++i) { + ASTNode partspec_val = (ASTNode) partspec.getChild(i); + String key = partspec_val.getChild(0).getText(); + String val = null; + if (partspec_val.getChildCount() > 1) { + val = stripQuotes(partspec_val.getChild(1).getText()); + } + partSpec.put(key.toLowerCase(), val); + } + return partSpec; + } + + private void analyzeShowPartitions(ASTNode ast) throws SemanticException { + ShowPartitionsDesc showPartsDesc; + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + List> partSpecs = getPartitionSpecs(ast); + // We only can have a single partition spec + assert (partSpecs.size() <= 1); + Map partSpec = null; + if (partSpecs.size() > 0) { + partSpec = partSpecs.get(0); + } + + validateTable(tableName, null); + + showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile(), + partSpec); + inputs.add(new ReadEntity(getTable(tableName))); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showPartsDesc), conf)); + setFetchTask(createFetchTask(showPartsDesc.getSchema())); + } + + private void analyzeShowCreateTable(ASTNode ast) throws SemanticException { + ShowCreateTableDesc showCreateTblDesc; + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + showCreateTblDesc = new ShowCreateTableDesc(tableName, ctx.getResFile() + .toString()); + + Table tab = getTable(tableName); + if (tab.getTableType() == org.apache.hadoop.hive.metastore.TableType.INDEX_TABLE) { + throw new SemanticException( + ErrorMsg.SHOW_CREATETABLE_INDEX.getMsg(tableName + + " has table type INDEX_TABLE")); + } + inputs.add(new ReadEntity(tab)); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showCreateTblDesc), conf)); + setFetchTask(createFetchTask(showCreateTblDesc.getSchema())); + } + + private void analyzeShowDatabases(ASTNode ast) throws SemanticException { + ShowDatabasesDesc showDatabasesDesc; + if (ast.getChildCount() == 1) { + String databasePattern = unescapeSQLString(ast.getChild(0) + .getText()); + showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile(), + databasePattern); + } else { + showDatabasesDesc = new ShowDatabasesDesc(ctx.getResFile()); + } + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showDatabasesDesc), conf)); + setFetchTask(createFetchTask(showDatabasesDesc.getSchema())); + } + + private void analyzeShowTables(ASTNode ast) throws SemanticException { + ShowTablesDesc showTblsDesc; + String dbName = SessionState.get().getCurrentDatabase(); + String tableNames = null; + + if (ast.getChildCount() > 3) { + throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); + } + + switch (ast.getChildCount()) { + case 1: // Uses a pattern + tableNames = unescapeSQLString(ast.getChild(0).getText()); + showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, + tableNames); + break; + case 2: // Specifies a DB + assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); + dbName = unescapeIdentifier(ast.getChild(1).getText()); + validateDatabase(dbName); + showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); + break; + case 3: // Uses a pattern and specifies a DB + assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); + dbName = unescapeIdentifier(ast.getChild(1).getText()); + tableNames = unescapeSQLString(ast.getChild(2).getText()); + validateDatabase(dbName); + showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, + tableNames); + break; + default: // No pattern or DB + showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); + break; + } + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showTblsDesc), conf)); + setFetchTask(createFetchTask(showTblsDesc.getSchema())); + } + + private void analyzeShowColumns(ASTNode ast) throws SemanticException { + ShowColumnsDesc showColumnsDesc; + String dbName = null; + String tableName = null; + switch (ast.getChildCount()) { + case 1: + tableName = getUnescapedName((ASTNode) ast.getChild(0)); + break; + case 2: + dbName = getUnescapedName((ASTNode) ast.getChild(0)); + tableName = getUnescapedName((ASTNode) ast.getChild(1)); + break; + default: + break; + } + + Table tab = getTable(dbName, tableName, true); + inputs.add(new ReadEntity(tab)); + + showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), dbName, + tableName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showColumnsDesc), conf)); + setFetchTask(createFetchTask(showColumnsDesc.getSchema())); + } + + private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { + ShowTableStatusDesc showTblStatusDesc; + String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); + String dbName = SessionState.get().getCurrentDatabase(); + int children = ast.getChildCount(); + HashMap partSpec = null; + if (children >= 2) { + if (children > 3) { + throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); + } + for (int i = 1; i < children; i++) { + ASTNode child = (ASTNode) ast.getChild(i); + if (child.getToken().getType() == HiveParser.Identifier) { + dbName = unescapeIdentifier(child.getText()); + } else if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) { + partSpec = getPartSpec(child); + } else { + throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); + } + } + } + + if (partSpec != null) { + validateTable(tableNames, partSpec); + } + + showTblStatusDesc = new ShowTableStatusDesc( + ctx.getResFile().toString(), dbName, tableNames, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showTblStatusDesc), conf)); + setFetchTask(createFetchTask(showTblStatusDesc.getSchema())); + } + + private void analyzeShowTableProperties(ASTNode ast) + throws SemanticException { + ShowTblPropertiesDesc showTblPropertiesDesc; + String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); + String dbName = SessionState.get().getCurrentDatabase(); + String propertyName = null; + if (ast.getChildCount() > 1) { + propertyName = unescapeSQLString(ast.getChild(1).getText()); + } + + validateTable(tableNames, null); + + showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile() + .toString(), tableNames, propertyName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showTblPropertiesDesc), conf)); + setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema())); + } + + private void analyzeShowIndexes(ASTNode ast) throws SemanticException { + ShowIndexesDesc showIndexesDesc; + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + showIndexesDesc = new ShowIndexesDesc(tableName, ctx.getResFile()); + + if (ast.getChildCount() == 2) { + int descOptions = ast.getChild(1).getType(); + showIndexesDesc + .setFormatted(descOptions == HiveParser.KW_FORMATTED); + } + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showIndexesDesc), conf)); + setFetchTask(createFetchTask(showIndexesDesc.getSchema())); + } + + /** + * Add the task according to the parsed command tree. This is used for the + * CLI command "SHOW FUNCTIONS;". + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsin failed + */ + private void analyzeShowFunctions(ASTNode ast) throws SemanticException { + ShowFunctionsDesc showFuncsDesc; + if (ast.getChildCount() == 1) { + String funcNames = stripQuotes(ast.getChild(0).getText()); + showFuncsDesc = new ShowFunctionsDesc(ctx.getResFile(), funcNames); + } else { + showFuncsDesc = new ShowFunctionsDesc(ctx.getResFile()); + } + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showFuncsDesc), conf)); + setFetchTask(createFetchTask(showFuncsDesc.getSchema())); + } + + /** + * Add the task according to the parsed command tree. This is used for the + * CLI command "SHOW LOCKS;". + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsing failed + */ + private void analyzeShowLocks(ASTNode ast) throws SemanticException { + String tableName = null; + HashMap partSpec = null; + boolean isExtended = false; + + if (ast.getChildCount() >= 1) { + // table for which show locks is being executed + for (int i = 0; i < ast.getChildCount(); i++) { + ASTNode child = (ASTNode) ast.getChild(i); + if (child.getType() == HiveParser.TOK_TABTYPE) { + ASTNode tableTypeExpr = child; + tableName = QualifiedNameUtil + .getFullyQualifiedName((ASTNode) tableTypeExpr + .getChild(0)); + // get partition metadata if partition specified + if (tableTypeExpr.getChildCount() == 2) { + ASTNode partspec = (ASTNode) tableTypeExpr.getChild(1); + partSpec = getPartSpec(partspec); + } + } else if (child.getType() == HiveParser.KW_EXTENDED) { + isExtended = true; + } + } + } + + HiveTxnManager txnManager = null; + try { + txnManager = TxnManagerFactory.getTxnManagerFactory() + .getTxnManager(conf); + } catch (LockException e) { + throw new SemanticException(e.getMessage()); + } + + ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), + tableName, partSpec, isExtended, + txnManager.useNewShowLocksFormat()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showLocksDesc), conf)); + setFetchTask(createFetchTask(showLocksDesc.getSchema())); + + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } + + /** + * Add the task according to the parsed command tree. This is used for the + * CLI command "SHOW LOCKS DATABASE database [extended];". + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsing failed + */ + private void analyzeShowDbLocks(ASTNode ast) throws SemanticException { + boolean isExtended = (ast.getChildCount() > 1); + String dbName = stripQuotes(ast.getChild(0).getText()); + + HiveTxnManager txnManager = null; + try { + txnManager = TxnManagerFactory.getTxnManagerFactory() + .getTxnManager(conf); + } catch (LockException e) { + throw new SemanticException(e.getMessage()); + } + + ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), + dbName, isExtended, txnManager.useNewShowLocksFormat()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showLocksDesc), conf)); + setFetchTask(createFetchTask(showLocksDesc.getSchema())); + + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } + + private void analyzeShowConf(ASTNode ast) throws SemanticException { + String confName = stripQuotes(ast.getChild(0).getText()); + ShowConfDesc showConfDesc = new ShowConfDesc(ctx.getResFile(), confName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showConfDesc), conf)); + setFetchTask(createFetchTask(showConfDesc.getSchema())); + } + + /** + * Add the task according to the parsed command tree. This is used for the + * CLI command "LOCK TABLE ..;". + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsing failed + */ + private void analyzeLockTable(ASTNode ast) throws SemanticException { + String tableName = getUnescapedName((ASTNode) ast.getChild(0)) + .toLowerCase(); + String mode = unescapeIdentifier(ast.getChild(1).getText() + .toUpperCase()); + List> partSpecs = getPartitionSpecs(ast); + + // We only can have a single partition spec + assert (partSpecs.size() <= 1); + Map partSpec = null; + if (partSpecs.size() > 0) { + partSpec = partSpecs.get(0); + } + + LockTableDesc lockTblDesc = new LockTableDesc(tableName, mode, + partSpec, HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); + lockTblDesc.setQueryStr(this.ctx.getCmd()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + lockTblDesc), conf)); + + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } + + /** + * Add a task to execute "SHOW COMPACTIONS" + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsing failed. + */ + private void analyzeShowCompactions(ASTNode ast) throws SemanticException { + ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + desc), conf)); + setFetchTask(createFetchTask(desc.getSchema())); + } + + /** + * Add a task to execute "SHOW COMPACTIONS" + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsing failed. + */ + private void analyzeShowTxns(ASTNode ast) throws SemanticException { + ShowTxnsDesc desc = new ShowTxnsDesc(ctx.getResFile()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + desc), conf)); + setFetchTask(createFetchTask(desc.getSchema())); + } + + /** + * Add the task according to the parsed command tree. This is used for the + * CLI command "UNLOCK TABLE ..;". + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsing failed + */ + private void analyzeUnlockTable(ASTNode ast) throws SemanticException { + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + List> partSpecs = getPartitionSpecs(ast); + + // We only can have a single partition spec + assert (partSpecs.size() <= 1); + Map partSpec = null; + if (partSpecs.size() > 0) { + partSpec = partSpecs.get(0); + } + + UnlockTableDesc unlockTblDesc = new UnlockTableDesc(tableName, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + unlockTblDesc), conf)); + + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } + + private void analyzeLockDatabase(ASTNode ast) throws SemanticException { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + String mode = unescapeIdentifier(ast.getChild(1).getText() + .toUpperCase()); + + // inputs.add(new ReadEntity(dbName)); + // outputs.add(new WriteEntity(dbName)); + LockDatabaseDesc lockDatabaseDesc = new LockDatabaseDesc(dbName, mode, + HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); + lockDatabaseDesc.setQueryStr(ctx.getCmd()); + DDLWork work = new DDLWork(getInputs(), getOutputs(), lockDatabaseDesc); + rootTasks.add(TaskFactory.get(work, conf)); + ctx.setNeedLockMgr(true); + } + + private void analyzeUnlockDatabase(ASTNode ast) throws SemanticException { + String dbName = unescapeIdentifier(ast.getChild(0).getText()); + + UnlockDatabaseDesc unlockDatabaseDesc = new UnlockDatabaseDesc(dbName); + DDLWork work = new DDLWork(getInputs(), getOutputs(), + unlockDatabaseDesc); + rootTasks.add(TaskFactory.get(work, conf)); + // Need to initialize the lock manager + ctx.setNeedLockMgr(true); + } + + /** + * Add the task according to the parsed command tree. This is used for the + * CLI command "DESCRIBE FUNCTION;". + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsing failed + */ + private void analyzeDescFunction(ASTNode ast) throws SemanticException { + String funcName; + boolean isExtended; + + if (ast.getChildCount() == 1) { + funcName = stripQuotes(ast.getChild(0).getText()); + isExtended = false; + } else if (ast.getChildCount() == 2) { + funcName = stripQuotes(ast.getChild(0).getText()); + isExtended = true; + } else { + throw new SemanticException( + "Unexpected Tokens at DESCRIBE FUNCTION"); + } + + DescFunctionDesc descFuncDesc = new DescFunctionDesc(ctx.getResFile(), + funcName, isExtended); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + descFuncDesc), conf)); + setFetchTask(createFetchTask(descFuncDesc.getSchema())); + } + + private void analyzeAlterTableRename(ASTNode ast, boolean expectView) + throws SemanticException { + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, + getUnescapedName((ASTNode) ast.getChild(1)), expectView); + + addInputsOutputsAlterTable(tblName, null, alterTblDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void analyzeAlterTableRenameCol(ASTNode ast) + throws SemanticException { + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + String newComment = null; + String newType = null; + newType = getTypeStringFromAST((ASTNode) ast.getChild(3)); + boolean first = false; + String flagCol = null; + ASTNode positionNode = null; + if (ast.getChildCount() == 6) { + newComment = unescapeSQLString(ast.getChild(4).getText()); + positionNode = (ASTNode) ast.getChild(5); + } else if (ast.getChildCount() == 5) { + if (ast.getChild(4).getType() == HiveParser.StringLiteral) { + newComment = unescapeSQLString(ast.getChild(4).getText()); + } else { + positionNode = (ASTNode) ast.getChild(4); + } + } + + if (positionNode != null) { + if (positionNode.getChildCount() == 0) { + first = true; + } else { + flagCol = unescapeIdentifier(positionNode.getChild(0).getText()); + } + } + + String oldColName = ast.getChild(1).getText(); + String newColName = ast.getChild(2).getText(); + + /* Validate the operation of renaming a column name. */ + Table tab = getTable(tblName); + + SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo(); + if ((null != skewInfo) && (null != skewInfo.getSkewedColNames()) + && skewInfo.getSkewedColNames().contains(oldColName)) { + throw new SemanticException( + oldColName + + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN + .getMsg()); + } + + AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, + unescapeIdentifier(oldColName), unescapeIdentifier(newColName), + newType, newComment, first, flagCol); + addInputsOutputsAlterTable(tblName, null, alterTblDesc); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void analyzeAlterTableUpdateStats(ASTNode ast) + throws SemanticException { + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + String colName = getUnescapedName((ASTNode) ast.getChild(1)); + HashMap mapProp = getProps((ASTNode) (ast.getChild(2)) + .getChild(0)); + + Table tbl = null; + try { + tbl = db.getTable(tblName); + } catch (HiveException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + String colType = null; + List cols = tbl.getCols(); + for (FieldSchema col : cols) { + if (colName.equalsIgnoreCase(col.getName())) { + colType = col.getType(); + break; + } + } + + ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tblName, + Arrays.asList(new String[] { colName }), + Arrays.asList(new String[] { colType }), true); + ColumnStatsUpdateTask cStatsUpdateTask = (ColumnStatsUpdateTask) TaskFactory + .get(new ColumnStatsUpdateWork(cStatsDesc, null, mapProp), conf); + rootTasks.add(cStatsUpdateTask); + } + + private void analyzeAlterTableUpdateStats(ASTNode ast,TablePartition tblPart) + throws SemanticException { + String tblName = tblPart.tableName; + HashMap partSpec = tblPart.partSpec; + assert partSpec.size()==1; + String partName = null; + for(Entry entry : partSpec.entrySet()){ + partName = entry.toString(); + } + String colName = getUnescapedName((ASTNode) ast.getChild(0)); + HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) + .getChild(0)); + + Table tbl = null; + try { + tbl = db.getTable(tblName); + } catch (HiveException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + String colType = null; + List cols = tbl.getCols(); + for (FieldSchema col : cols) { + if (colName.equalsIgnoreCase(col.getName())) { + colType = col.getType(); + break; + } + } + + ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tblName, + Arrays.asList(new String[] { colName }), + Arrays.asList(new String[] { colType }), false); + ColumnStatsUpdateTask cStatsUpdateTask = (ColumnStatsUpdateTask) TaskFactory + .get(new ColumnStatsUpdateWork(cStatsDesc, partName, mapProp), conf); + rootTasks.add(cStatsUpdateTask); + } + + private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, + HashMap oldPartSpec) throws SemanticException { + Map newPartSpec = extractPartitionSpecs(ast.getChild(0)); + if (newPartSpec == null) { + throw new SemanticException("RENAME PARTITION Missing Destination" + + ast); + } + Table tab = getTable(tblName, true); + validateAlterTableType(tab, AlterTableTypes.RENAMEPARTITION); + ReadEntity re = new ReadEntity(tab); + re.noLockNeeded(); + inputs.add(re); + + List> partSpecs = new ArrayList>(); + partSpecs.add(oldPartSpec); + partSpecs.add(newPartSpec); + addTablePartsOutputs(tblName, partSpecs, + WriteEntity.WriteType.DDL_EXCLUSIVE); + RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc( + SessionState.get().getCurrentDatabase(), tblName, oldPartSpec, + newPartSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + renamePartitionDesc), conf)); + } + + private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, + HashMap partSpec) throws SemanticException { + Table tab = getTable(tblName, true); + if (tab.getBucketCols() == null || tab.getBucketCols().isEmpty()) { + throw new SemanticException( + ErrorMsg.ALTER_BUCKETNUM_NONBUCKETIZED_TBL.getMsg()); + } + validateAlterTableType(tab, AlterTableTypes.ALTERBUCKETNUM); + inputs.add(new ReadEntity(tab)); + + int bucketNum = Integer.parseInt(ast.getChild(0).getText()); + AlterTableDesc alterBucketNum = new AlterTableDesc(tblName, partSpec, + bucketNum); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterBucketNum), conf)); + } + + private void analyzeAlterTableModifyCols(ASTNode ast, + AlterTableTypes alterType) throws SemanticException { + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + List newCols = getColumns((ASTNode) ast.getChild(1)); + AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols, + alterType); + + addInputsOutputsAlterTable(tblName, null, alterTblDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) + throws SemanticException { + + boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null) + || HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); + // If the drop has to fail on non-existent partitions, we cannot batch + // expressions. + // That is because we actually have to check each separate expression + // for existence. + // We could do a small optimization for the case where expr has all + // columns and all + // operators are equality, if we assume those would always match one + // partition (which + // may not be true with legacy, non-normalized column values). This is + // probably a + // popular case but that's kinda hacky. Let's not do it for now. + boolean canGroupExprs = ifExists; + + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + Table tab = getTable(tblName, true); + Map> partSpecs = getFullPartitionSpecs( + ast, tab, canGroupExprs); + if (partSpecs.isEmpty()) + return; // nothing to do + + validateAlterTableType(tab, AlterTableTypes.DROPPARTITION, expectView); + ReadEntity re = new ReadEntity(tab); + re.noLockNeeded(); + inputs.add(re); + + boolean ignoreProtection = ast + .getFirstChildWithType(HiveParser.TOK_IGNOREPROTECTION) != null; + addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, + ignoreProtection); + + DropTableDesc dropTblDesc = new DropTableDesc(tblName, partSpecs, + expectView, ignoreProtection); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + dropTblDesc), conf)); + } + + private void analyzeAlterTablePartColType(ASTNode ast) + throws SemanticException { + // get table name + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + + Table tab = null; + + // check if table exists. + try { + tab = getTable(tblName, true); + inputs.add(new ReadEntity(tab)); + } catch (HiveException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + + // validate the DDL is a valid operation on the table. + validateAlterTableType(tab, AlterTableTypes.ALTERPARTITION, false); + + // Alter table ... partition column ( column newtype) only takes one + // column at a time. + // It must have a column name followed with type. + ASTNode colAst = (ASTNode) ast.getChild(1); + assert (colAst.getChildCount() == 2); + + FieldSchema newCol = new FieldSchema(); + + // get column name + String name = colAst.getChild(0).getText().toLowerCase(); + newCol.setName(unescapeIdentifier(name)); + + // get column type + ASTNode typeChild = (ASTNode) (colAst.getChild(1)); + newCol.setType(getTypeStringFromAST(typeChild)); + + // check if column is defined or not + boolean fFoundColumn = false; + for (FieldSchema col : tab.getTTable().getPartitionKeys()) { + if (col.getName().compareTo(newCol.getName()) == 0) { + fFoundColumn = true; + } + } + + // raise error if we could not find the column + if (!fFoundColumn) { + throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(newCol + .getName())); + } + + AlterTableAlterPartDesc alterTblAlterPartDesc = new AlterTableAlterPartDesc( + SessionState.get().getCurrentDatabase(), tblName, newCol); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblAlterPartDesc), conf)); + } + + /** + * Add one or more partitions to a table. Useful when the data has been + * copied to the right location by some other process. + * + * @param ast + * The parsed command tree. + * + * @param expectView + * True for ALTER VIEW, false for ALTER TABLE. + * + * @throws SemanticException + * Parsing failed + */ + private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) + throws SemanticException { + + // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? + // alterStatementSuffixAddPartitionsElement+) + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + boolean ifNotExists = ast.getChild(1).getType() == HiveParser.TOK_IFNOTEXISTS; + + Table tab = getTable(tblName, true); + boolean isView = tab.isView(); + validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView); + outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED)); + + int numCh = ast.getChildCount(); + int start = ifNotExists ? 2 : 1; + + String currentLocation = null; + Map currentPart = null; + // Parser has done some verification, so the order of tokens doesn't + // need to be verified here. + AddPartitionDesc addPartitionDesc = new AddPartitionDesc( + tab.getDbName(), tblName, ifNotExists); + for (int num = start; num < numCh; num++) { + ASTNode child = (ASTNode) ast.getChild(num); + switch (child.getToken().getType()) { + case HiveParser.TOK_PARTSPEC: + if (currentPart != null) { + addPartitionDesc.addPartition(currentPart, currentLocation); + currentLocation = null; + } + currentPart = getPartSpec(child); + validatePartitionValues(currentPart); // validate reserved + // values + validatePartSpec(tab, currentPart, child, conf, true); + break; + case HiveParser.TOK_PARTITIONLOCATION: + // if location specified, set in partition + if (isView) { + throw new SemanticException( + "LOCATION clause illegal for view partition"); + } + currentLocation = unescapeSQLString(child.getChild(0).getText()); + boolean isLocal = false; + try { + // do best effor to determine if this is a local file + String scheme = new URI(currentLocation).getScheme(); + if (scheme != null) { + isLocal = FileUtils.isLocalFile(conf, currentLocation); + } + } catch (URISyntaxException e) { + LOG.warn("Unable to create URI from " + currentLocation, e); + } + inputs.add(new ReadEntity(new Path(currentLocation), isLocal)); + break; + default: + throw new SemanticException("Unknown child: " + child); + } + } + + // add the last one + if (currentPart != null) { + addPartitionDesc.addPartition(currentPart, currentLocation); + } + + if (addPartitionDesc.getPartitionCount() == 0) { + // nothing to do + return; + } + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + addPartitionDesc), conf)); + + if (isView) { + // Compile internal query to capture underlying table partition + // dependencies + StringBuilder cmd = new StringBuilder(); + cmd.append("SELECT * FROM "); + cmd.append(HiveUtils.unparseIdentifier(tblName)); + cmd.append(" WHERE "); + boolean firstOr = true; + for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) { + AddPartitionDesc.OnePartitionDesc partitionDesc = addPartitionDesc + .getPartition(i); + if (firstOr) { + firstOr = false; + } else { + cmd.append(" OR "); + } + boolean firstAnd = true; + cmd.append("("); + for (Map.Entry entry : partitionDesc + .getPartSpec().entrySet()) { + if (firstAnd) { + firstAnd = false; + } else { + cmd.append(" AND "); + } + cmd.append(HiveUtils.unparseIdentifier(entry.getKey(), conf)); + cmd.append(" = '"); + cmd.append(HiveUtils.escapeString(entry.getValue())); + cmd.append("'"); + } + cmd.append(")"); + } + Driver driver = new Driver(conf); + int rc = driver.compile(cmd.toString(), false); + if (rc != 0) { + throw new SemanticException(ErrorMsg.NO_VALID_PARTN.getMsg()); + } + inputs.addAll(driver.getPlan().getInputs()); + } + } + + private Partition getPartitionForOutput(Table tab, + Map currentPart) throws SemanticException { + validatePartitionValues(currentPart); + try { + Partition partition = db.getPartition(tab, currentPart, false); + if (partition != null) { + outputs.add(new WriteEntity(partition, + WriteEntity.WriteType.INSERT)); + } + return partition; + } catch (HiveException e) { + LOG.warn("wrong partition spec " + currentPart); + } + return null; + } + + /** + * Rewrite the metadata for one or more partitions in a table. Useful when + * an external process modifies files on HDFS and you want the pre/post + * hooks to be fired for the specified partition. + * + * @param ast + * The parsed command tree. + * @throws SemanticException + * Parsin failed + */ + private void analyzeAlterTableTouch(CommonTree ast) + throws SemanticException { + + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + Table tab = getTable(tblName, true); + validateAlterTableType(tab, AlterTableTypes.TOUCH); + inputs.add(new ReadEntity(tab)); + + // partition name to value + List> partSpecs = getPartitionSpecs(ast); + + if (partSpecs.size() == 0) { + AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( + SessionState.get().getCurrentDatabase(), tblName, null, + AlterTableDesc.AlterTableTypes.TOUCH); + outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), touchDesc), conf)); + } else { + addTablePartsOutputs(tblName, partSpecs, + WriteEntity.WriteType.DDL_NO_LOCK); + for (Map partSpec : partSpecs) { + AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( + SessionState.get().getCurrentDatabase(), tblName, + partSpec, AlterTableDesc.AlterTableTypes.TOUCH); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), touchDesc), conf)); + } + } + } + + private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) + throws SemanticException { + + if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) { + throw new SemanticException( + ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); + + } + String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + // partition name to value + List> partSpecs = getPartitionSpecs(ast); + + Table tab = getTable(tblName, true); + addTablePartsOutputs(tblName, partSpecs, true, + WriteEntity.WriteType.DDL_NO_LOCK); + validateAlterTableType(tab, AlterTableTypes.ARCHIVE); + inputs.add(new ReadEntity(tab)); + + if (partSpecs.size() > 1) { + throw new SemanticException( + isUnArchive ? ErrorMsg.UNARCHIVE_ON_MULI_PARTS.getMsg() + : ErrorMsg.ARCHIVE_ON_MULI_PARTS.getMsg()); + } + if (partSpecs.size() == 0) { + throw new SemanticException(ErrorMsg.ARCHIVE_ON_TABLE.getMsg()); + } + + Map partSpec = partSpecs.get(0); + try { + isValidPrefixSpec(tab, partSpec); + } catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); + } + AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc( + SessionState.get().getCurrentDatabase(), tblName, partSpec, + (isUnArchive ? AlterTableTypes.UNARCHIVE + : AlterTableTypes.ARCHIVE)); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + archiveDesc), conf)); + + } + + /** + * Verify that the information in the metastore matches up with the data on + * the fs. + * + * @param ast + * Query tree. + * @throws SemanticException + */ + private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { + String tableName = null; + boolean repair = false; + if (ast.getChildCount() > 0) { + repair = ast.getChild(0).getType() == HiveParser.KW_REPAIR; + if (!repair) { + tableName = getUnescapedName((ASTNode) ast.getChild(0)); + } else if (ast.getChildCount() > 1) { + tableName = getUnescapedName((ASTNode) ast.getChild(1)); + } + } + List> specs = getPartitionSpecs(ast); + MsckDesc checkDesc = new MsckDesc(tableName, specs, ctx.getResFile(), + repair); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + checkDesc), conf)); + } + + /** + * Get the partition specs from the tree. + * + * @param ast + * Tree to extract partitions from. + * @return A list of partition name to value mappings. + * @throws SemanticException + */ + private List> getPartitionSpecs(CommonTree ast) + throws SemanticException { + List> partSpecs = new ArrayList>(); + int childIndex = 0; + // get partition metadata if partition specified + for (childIndex = 1; childIndex < ast.getChildCount(); childIndex++) { + Tree partspec = ast.getChild(childIndex); + // sanity check + if (partspec.getType() == HiveParser.TOK_PARTSPEC) { + partSpecs.add(getPartSpec((ASTNode) partspec)); + } + } + return partSpecs; + } + + /** + * Get the partition specs from the tree. This stores the full specification + * with the comparator operator into the output list. + * + * @param ast + * Tree to extract partitions from. + * @param tab + * Table. + * @param result + * Map of partitions by prefix length. Most of the time prefix + * length will be the same for all partition specs, so we can + * just OR the expressions. + */ + private Map> getFullPartitionSpecs( + CommonTree ast, Table tab, boolean canGroupExprs) + throws SemanticException { + Map colTypes = new HashMap(); + for (FieldSchema fs : tab.getPartitionKeys()) { + colTypes.put(fs.getName().toLowerCase(), fs.getType()); + } + + Map> result = new HashMap>(); + for (int childIndex = 1; childIndex < ast.getChildCount(); childIndex++) { + Tree partSpecTree = ast.getChild(childIndex); + if (partSpecTree.getType() != HiveParser.TOK_PARTSPEC) + continue; + ExprNodeGenericFuncDesc expr = null; + HashSet names = new HashSet( + partSpecTree.getChildCount()); + for (int i = 0; i < partSpecTree.getChildCount(); ++i) { + CommonTree partSpecSingleKey = (CommonTree) partSpecTree + .getChild(i); + assert (partSpecSingleKey.getType() == HiveParser.TOK_PARTVAL); + String key = partSpecSingleKey.getChild(0).getText() + .toLowerCase(); + String operator = partSpecSingleKey.getChild(1).getText(); + String val = stripQuotes(partSpecSingleKey.getChild(2) + .getText()); + + String type = colTypes.get(key); + if (type == null) { + throw new SemanticException("Column " + key + " not found"); + } + // Create the corresponding hive expression to filter on + // partition columns. + PrimitiveTypeInfo pti = TypeInfoFactory + .getPrimitiveTypeInfo(type); + Converter converter = ObjectInspectorConverters + .getConverter( + TypeInfoUtils + .getStandardJavaObjectInspectorFromTypeInfo(TypeInfoFactory.stringTypeInfo), + TypeInfoUtils + .getStandardJavaObjectInspectorFromTypeInfo(pti)); + ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, + null, true); + ExprNodeGenericFuncDesc op = makeBinaryPredicate(operator, + column, + new ExprNodeConstantDesc(pti, converter.convert(val))); + // If it's multi-expr filter (e.g. a='5', b='2012-01-02'), AND + // with previous exprs. + expr = (expr == null) ? op : makeBinaryPredicate("and", expr, + op); + names.add(key); + } + if (expr == null) + continue; + // We got the expr for one full partition spec. Determine the prefix + // length. + int prefixLength = calculatePartPrefix(tab, names); + List orExpr = result.get(prefixLength); + // We have to tell apart partitions resulting from spec with + // different prefix lengths. + // So, if we already have smth for the same prefix length, we can OR + // the two. + // If we don't, create a new separate filter. In most cases there + // will only be one. + if (orExpr == null) { + result.put(prefixLength, Lists.newArrayList(expr)); + } else if (canGroupExprs) { + orExpr.set(0, makeBinaryPredicate("or", expr, orExpr.get(0))); + } else { + orExpr.add(expr); + } + } + return result; + } + + private static ExprNodeGenericFuncDesc makeBinaryPredicate(String fn, + ExprNodeDesc left, ExprNodeDesc right) { + return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, + FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), + Lists.newArrayList(left, right)); + } + + /** + * Calculates the partition prefix length based on the drop spec. This is + * used to avoid deleting archived partitions with lower level. For example, + * if, for A and B key cols, drop spec is A=5, B=6, we shouldn't drop + * archived A=5/, because it can contain B-s other than 6. + * + * @param tbl + * Table + * @param partSpecKeys + * Keys present in drop partition spec. + */ + private int calculatePartPrefix(Table tbl, HashSet partSpecKeys) { + int partPrefixToDrop = 0; + for (FieldSchema fs : tbl.getPartCols()) { + if (!partSpecKeys.contains(fs.getName())) + break; + ++partPrefixToDrop; + } + return partPrefixToDrop; + } + + /** + * Certain partition values are are used by hive. e.g. the default partition + * in dynamic partitioning and the intermediate partition values used in the + * archiving process. Naturally, prohibit the user from creating partitions + * with these reserved values. The check that this function is more + * restrictive than the actual limitation, but it's simpler. Should be okay + * since the reserved names are fairly long and uncommon. + */ + private void validatePartitionValues(Map partSpec) + throws SemanticException { + + for (Entry e : partSpec.entrySet()) { + for (String s : reservedPartitionValues) { + if (e.getValue().contains(s)) { + throw new SemanticException( + ErrorMsg.RESERVED_PART_VAL.getMsg("(User value: " + + e.getValue() + " Reserved substring: " + + s + ")")); + } + } + } + } + + /** + * Add the table partitions to be modified in the output, so that it is + * available for the pre-execution hook. If the partition does not exist, no + * error is thrown. + */ + private void addTablePartsOutputs(String tblName, + List> partSpecs, WriteEntity.WriteType writeType) + throws SemanticException { + addTablePartsOutputs(tblName, partSpecs, false, false, null, writeType); + } + + /** + * Add the table partitions to be modified in the output, so that it is + * available for the pre-execution hook. If the partition does not exist, no + * error is thrown. + */ + private void addTablePartsOutputs(String tblName, + List> partSpecs, boolean allowMany, + WriteEntity.WriteType writeType) throws SemanticException { + addTablePartsOutputs(tblName, partSpecs, false, allowMany, null, + writeType); + } + + /** + * Add the table partitions to be modified in the output, so that it is + * available for the pre-execution hook. If the partition does not exist, + * throw an error if throwIfNonExistent is true, otherwise ignore it. + */ + private void addTablePartsOutputs(String tblName, + List> partSpecs, boolean throwIfNonExistent, + boolean allowMany, ASTNode ast, WriteEntity.WriteType writeType) + throws SemanticException { + Table tab = getTable(tblName); + + Iterator> i; + int index; + for (i = partSpecs.iterator(), index = 1; i.hasNext(); ++index) { + Map partSpec = i.next(); + List parts = null; + if (allowMany) { + try { + parts = db.getPartitions(tab, partSpec); + } catch (HiveException e) { + LOG.error("Got HiveException during obtaining list of partitions" + + StringUtils.stringifyException(e)); + throw new SemanticException(e.getMessage(), e); + } + } else { + parts = new ArrayList(); + try { + Partition p = db.getPartition(tab, partSpec, false); + if (p != null) { + parts.add(p); + } + } catch (HiveException e) { + LOG.debug("Wrong specification" + + StringUtils.stringifyException(e)); + throw new SemanticException(e.getMessage(), e); + } + } + if (parts.isEmpty()) { + if (throwIfNonExistent) { + throw new SemanticException( + ErrorMsg.INVALID_PARTITION.getMsg(ast + .getChild(index))); + } + } + for (Partition p : parts) { + // Don't request any locks here, as the table has already been + // locked. + outputs.add(new WriteEntity(p, writeType)); + } + } + } + + /** + * Add the table partitions to be modified in the output, so that it is + * available for the pre-execution hook. If the partition does not exist, + * throw an error if throwIfNonExistent is true, otherwise ignore it. + */ + private void addTableDropPartsOutputs(Table tab, + Collection> partSpecs, + boolean throwIfNonExistent, boolean ignoreProtection) + throws SemanticException { + + for (List specs : partSpecs) { + for (ExprNodeGenericFuncDesc partSpec : specs) { + List parts = new ArrayList(); + boolean hasUnknown = false; + try { + hasUnknown = db.getPartitionsByExpr(tab, partSpec, conf, + parts); + } catch (Exception e) { + throw new SemanticException( + ErrorMsg.INVALID_PARTITION.getMsg(partSpec + .getExprString()), e); + } + if (hasUnknown) { + throw new SemanticException( + "Unexpected unknown partitions for " + + partSpec.getExprString()); + } + + // TODO: ifExists could be moved to metastore. In fact it + // already supports that. Check it + // for now since we get parts for output anyway, so we can get + // the error message + // earlier... If we get rid of output, we can get rid of this. + if (parts.isEmpty()) { + if (throwIfNonExistent) { + throw new SemanticException( + ErrorMsg.INVALID_PARTITION.getMsg(partSpec + .getExprString())); + } + } + for (Partition p : parts) { + // TODO: same thing, metastore already checks this but check + // here if we can. + if (!ignoreProtection && !p.canDrop()) { + throw new SemanticException( + ErrorMsg.DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION + .getMsg(p.getCompleteName())); + } + outputs.add(new WriteEntity(p, + WriteEntity.WriteType.DDL_EXCLUSIVE)); + } + } + } + } + + /** + * Analyze alter table's skewed table + * + * @param ast + * node + * @throws SemanticException + */ + private void analyzeAltertableSkewedby(ASTNode ast) + throws SemanticException { + /** + * Throw an error if the user tries to use the DDL with + * hive.internal.ddl.list.bucketing.enable set to false. + */ + HiveConf hiveConf = SessionState.get().getConf(); + + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + Table tab = getTable(tableName, true); + + inputs.add(new ReadEntity(tab)); + outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); + + validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY); + + if (ast.getChildCount() == 1) { + /* Convert a skewed table to non-skewed table. */ + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, + new ArrayList(), new ArrayList>()); + alterTblDesc.setStoredAsSubDirectories(false); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), alterTblDesc), conf)); + } else { + switch (((ASTNode) ast.getChild(1)).getToken().getType()) { + case HiveParser.TOK_TABLESKEWED: + handleAlterTableSkewedBy(ast, tableName, tab); + break; + case HiveParser.TOK_STOREDASDIRS: + handleAlterTableDisableStoredAsDirs(tableName, tab); + break; + default: + assert false; + } + } + } + + /** + * Handle alter table not stored as directories + * + * @param tableName + * @param tab + * @throws SemanticException + */ + private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) + throws SemanticException { + List skewedColNames = tab.getSkewedColNames(); + List> skewedColValues = tab.getSkewedColValues(); + if ((skewedColNames == null) || (skewedColNames.size() == 0) + || (skewedColValues == null) || (skewedColValues.size() == 0)) { + throw new SemanticException( + ErrorMsg.ALTER_TBL_STOREDASDIR_NOT_SKEWED.getMsg(tableName)); + } + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, + skewedColNames, skewedColValues); + alterTblDesc.setStoredAsSubDirectories(false); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + /** + * Process "alter table skewed by .. on .. stored as directories + * + * @param ast + * @param tableName + * @param tab + * @throws SemanticException + */ + private void handleAlterTableSkewedBy(ASTNode ast, String tableName, + Table tab) throws SemanticException { + List skewedColNames = new ArrayList(); + List> skewedValues = new ArrayList>(); + /* skewed column names. */ + ASTNode skewedNode = (ASTNode) ast.getChild(1); + skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, + skewedNode); + /* skewed value. */ + analyzeDDLSkewedValues(skewedValues, skewedNode); + // stored as directories + boolean storedAsDirs = analyzeStoredAdDirs(skewedNode); + + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, + skewedColNames, skewedValues); + alterTblDesc.setStoredAsSubDirectories(storedAsDirs); + /** + * Validate information about skewed table + */ + alterTblDesc.setTable(tab); + alterTblDesc.validate(); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + /** + * Analyze skewed column names + * + * @param skewedColNames + * @param child + * @return + * @throws SemanticException + */ + private List analyzeAlterTableSkewedColNames( + List skewedColNames, ASTNode child) + throws SemanticException { + Tree nNode = child.getChild(0); + if (nNode == null) { + throw new SemanticException( + ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); + } else { + ASTNode nAstNode = (ASTNode) nNode; + if (nAstNode.getToken().getType() != HiveParser.TOK_TABCOLNAME) { + throw new SemanticException( + ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); + } else { + skewedColNames = getColumnNames(nAstNode); + } + } + return skewedColNames; + } + + /** + * Given a ASTNode, return list of values. + * + * use case: create table xyz list bucketed (col1) with skew (1,2,5) AST + * Node is for (1,2,5) + * + * @param ast + * @return + */ + private List getColumnValues(ASTNode ast) { + List colList = new ArrayList(); + int numCh = ast.getChildCount(); + for (int i = 0; i < numCh; i++) { + ASTNode child = (ASTNode) ast.getChild(i); + colList.add(stripQuotes(child.getText()).toLowerCase()); + } + return colList; + } + + /** + * Analyze alter table's skewed location + * + * @param ast + * @param tableName + * @param partSpec + * @throws SemanticException + */ + private void analyzeAlterTableSkewedLocation(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + /** + * Throw an error if the user tries to use the DDL with + * hive.internal.ddl.list.bucketing.enable set to false. + */ + HiveConf hiveConf = SessionState.get().getConf(); + /** + * Retrieve mappings from parser + */ + Map, String> locations = new HashMap, String>(); + ArrayList locNodes = ast.getChildren(); + if (null == locNodes) { + throw new SemanticException( + ErrorMsg.ALTER_TBL_SKEWED_LOC_NO_LOC.getMsg()); + } else { + for (Node locNode : locNodes) { + // TOK_SKEWED_LOCATIONS + ASTNode locAstNode = (ASTNode) locNode; + ArrayList locListNodes = locAstNode.getChildren(); + if (null == locListNodes) { + throw new SemanticException( + ErrorMsg.ALTER_TBL_SKEWED_LOC_NO_LOC.getMsg()); + } else { + for (Node locListNode : locListNodes) { + // TOK_SKEWED_LOCATION_LIST + ASTNode locListAstNode = (ASTNode) locListNode; + ArrayList locMapNodes = locListAstNode + .getChildren(); + if (null == locMapNodes) { + throw new SemanticException( + ErrorMsg.ALTER_TBL_SKEWED_LOC_NO_LOC + .getMsg()); + } else { + for (Node locMapNode : locMapNodes) { + // TOK_SKEWED_LOCATION_MAP + ASTNode locMapAstNode = (ASTNode) locMapNode; + ArrayList locMapAstNodeMaps = locMapAstNode + .getChildren(); + if ((null == locMapAstNodeMaps) + || (locMapAstNodeMaps.size() != 2)) { + throw new SemanticException( + ErrorMsg.ALTER_TBL_SKEWED_LOC_NO_MAP + .getMsg()); + } else { + List keyList = new LinkedList(); + ASTNode node = (ASTNode) locMapAstNodeMaps + .get(0); + if (node.getToken().getType() == HiveParser.TOK_TABCOLVALUES) { + keyList = getSkewedValuesFromASTNode(node); + } else if (isConstant(node)) { + keyList.add(PlanUtils.stripQuotes(node + .getText())); + } else { + throw new SemanticException( + ErrorMsg.SKEWED_TABLE_NO_COLUMN_VALUE + .getMsg()); + } + String newLocation = PlanUtils + .stripQuotes(unescapeSQLString(((ASTNode) locMapAstNodeMaps + .get(1)).getText())); + validateSkewedLocationString(newLocation); + locations.put(keyList, newLocation); + addLocationToOutputs(newLocation); + } + } + } + } + } + } + } + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, locations, + partSpec); + addInputsOutputsAlterTable(tableName, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void addLocationToOutputs(String newLocation) { + outputs.add(new WriteEntity(new Path(newLocation), FileUtils + .isLocalFile(conf, newLocation))); + } + + /** + * Check if the node is constant. + * + * @param node + * @return + */ + private boolean isConstant(ASTNode node) { + boolean result = false; + switch (node.getToken().getType()) { + case HiveParser.Number: + result = true; + break; + case HiveParser.StringLiteral: + result = true; + break; + case HiveParser.BigintLiteral: + result = true; + break; + case HiveParser.SmallintLiteral: + result = true; + break; + case HiveParser.TinyintLiteral: + result = true; + break; + case HiveParser.DecimalLiteral: + result = true; + break; + case HiveParser.CharSetName: + result = true; + break; + case HiveParser.KW_TRUE: + case HiveParser.KW_FALSE: + result = true; + break; + default: + break; + } + return result; + } + + private void validateSkewedLocationString(String newLocation) + throws SemanticException { + /* Validate location string. */ + try { + URI locUri = new URI(newLocation); + if (!locUri.isAbsolute() || locUri.getScheme() == null + || locUri.getScheme().trim().equals("")) { + throw new SemanticException( + newLocation + + " is not absolute or has no scheme information. " + + "Please specify a complete absolute uri with scheme information."); + } + } catch (URISyntaxException e) { + throw new SemanticException(e); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index f5d0602..b1a2b24 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -146,6 +146,7 @@ TOK_ALTERTABLE_ARCHIVE; TOK_ALTERTABLE_UNARCHIVE; TOK_ALTERTABLE_SERDEPROPERTIES; TOK_ALTERTABLE_SERIALIZER; +TOK_ALTERTABLE_UPDATECOLSTATS; TOK_TABLE_PARTITION; TOK_ALTERTABLE_FILEFORMAT; TOK_ALTERTABLE_LOCATION; @@ -937,6 +938,7 @@ alterTableStatementSuffix : alterStatementSuffixRename | alterStatementSuffixAddCol | alterStatementSuffixRenameCol + | alterStatementSuffixUpdateStatsCol | alterStatementSuffixDropPartitions | alterStatementSuffixAddPartitions | alterStatementSuffixTouch @@ -1029,6 +1031,13 @@ alterStatementSuffixRenameCol ->^(TOK_ALTERTABLE_RENAMECOL identifier $oldName $newName colType $comment? alterStatementChangeColPosition?) ; +alterStatementSuffixUpdateStatsCol +@init { pushMsg("update column statistics", state); } +@after { popMsg(state); } + : identifier KW_UPDATE KW_STATISTICS KW_FOR KW_COLUMN? colName=identifier KW_SET tableProperties (KW_COMMENT comment=StringLiteral)? + ->^(TOK_ALTERTABLE_UPDATECOLSTATS identifier $colName tableProperties $comment?) + ; + alterStatementChangeColPosition : first=KW_FIRST|KW_AFTER afterCol=identifier ->{$first != null}? ^(TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION ) @@ -1131,6 +1140,7 @@ alterTblPartitionStatementSuffix | alterStatementSuffixMergeFiles | alterStatementSuffixSerdeProperties | alterStatementSuffixRenamePart + | alterStatementSuffixStatsPart | alterStatementSuffixBucketNum | alterTblPartitionStatementSuffixSkewedLocation | alterStatementSuffixClusterbySortby @@ -1222,6 +1232,13 @@ alterStatementSuffixRenamePart ->^(TOK_ALTERTABLE_RENAMEPART partitionSpec) ; +alterStatementSuffixStatsPart +@init { pushMsg("alter table stats partition statement", state); } +@after { popMsg(state); } + : KW_UPDATE KW_STATISTICS KW_FOR KW_COLUMN? colName=identifier KW_SET tableProperties (KW_COMMENT comment=StringLiteral)? + ->^(TOK_ALTERTABLE_UPDATECOLSTATS $colName tableProperties $comment?) + ; + alterStatementSuffixMergeFiles @init { pushMsg("", state); } @after { popMsg(state); } @@ -1301,6 +1318,7 @@ descStatement | (KW_DESCRIBE|KW_DESC) KW_DATABASE KW_EXTENDED? (dbName=identifier) -> ^(TOK_DESCDATABASE $dbName KW_EXTENDED?) ; + analyzeStatement @init { pushMsg("analyze statement", state); } @after { popMsg(state); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index d38270c..5335bca 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -912,6 +912,7 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) boolean phase1Result = true; QBParseInfo qbp = qb.getParseInfo(); + boolean skipRecursion = false; if (ast.getToken() != null) { @@ -8325,6 +8326,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT // Go over all the destination tables for (String dest : commonGroupByDestGroup) { + curr = inputs.get(dest); if (qbp.getWhrForClause(dest) != null) { @@ -9440,7 +9442,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { // such as JDBC would prefer instead of the c0, c1 we'll end // up with later. Operator sinkOp = genPlan(qb); - + if (createVwDesc != null) resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver()); else @@ -9487,7 +9489,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { if (LOG.isDebugEnabled()) { LOG.debug("Before logical optimization\n" + Operator.toString(pCtx.getTopOps().values())); } - + Optimizer optm = new Optimizer(); optm.setPctx(pCtx); optm.initialize(conf); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 268920a..3161594 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -30,243 +30,291 @@ */ public final class SemanticAnalyzerFactory { - static HashMap commandType = new HashMap(); - static HashMap tablePartitionCommandType = new HashMap(); + static HashMap commandType = new HashMap(); + static HashMap tablePartitionCommandType = new HashMap(); - static { - commandType.put(HiveParser.TOK_EXPLAIN, HiveOperation.EXPLAIN); - commandType.put(HiveParser.TOK_LOAD, HiveOperation.LOAD); - commandType.put(HiveParser.TOK_EXPORT, HiveOperation.EXPORT); - commandType.put(HiveParser.TOK_IMPORT, HiveOperation.IMPORT); - commandType.put(HiveParser.TOK_CREATEDATABASE, HiveOperation.CREATEDATABASE); - commandType.put(HiveParser.TOK_DROPDATABASE, HiveOperation.DROPDATABASE); - commandType.put(HiveParser.TOK_SWITCHDATABASE, HiveOperation.SWITCHDATABASE); - commandType.put(HiveParser.TOK_CREATETABLE, HiveOperation.CREATETABLE); - commandType.put(HiveParser.TOK_TRUNCATETABLE, HiveOperation.TRUNCATETABLE); - commandType.put(HiveParser.TOK_DROPTABLE, HiveOperation.DROPTABLE); - commandType.put(HiveParser.TOK_DESCTABLE, HiveOperation.DESCTABLE); - commandType.put(HiveParser.TOK_DESCFUNCTION, HiveOperation.DESCFUNCTION); - commandType.put(HiveParser.TOK_MSCK, HiveOperation.MSCK); - commandType.put(HiveParser.TOK_ALTERTABLE_ADDCOLS, HiveOperation.ALTERTABLE_ADDCOLS); - commandType.put(HiveParser.TOK_ALTERTABLE_REPLACECOLS, HiveOperation.ALTERTABLE_REPLACECOLS); - commandType.put(HiveParser.TOK_ALTERTABLE_RENAMECOL, HiveOperation.ALTERTABLE_RENAMECOL); - commandType.put(HiveParser.TOK_ALTERTABLE_RENAME, HiveOperation.ALTERTABLE_RENAME); - commandType.put(HiveParser.TOK_ALTERTABLE_DROPPARTS, HiveOperation.ALTERTABLE_DROPPARTS); - commandType.put(HiveParser.TOK_ALTERTABLE_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS); - commandType.put(HiveParser.TOK_ALTERTABLE_TOUCH, HiveOperation.ALTERTABLE_TOUCH); - commandType.put(HiveParser.TOK_ALTERTABLE_ARCHIVE, HiveOperation.ALTERTABLE_ARCHIVE); - commandType.put(HiveParser.TOK_ALTERTABLE_UNARCHIVE, HiveOperation.ALTERTABLE_UNARCHIVE); - commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); - commandType.put(HiveParser.TOK_DROPTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); - commandType.put(HiveParser.TOK_SHOWDATABASES, HiveOperation.SHOWDATABASES); - commandType.put(HiveParser.TOK_SHOWTABLES, HiveOperation.SHOWTABLES); - commandType.put(HiveParser.TOK_SHOWCOLUMNS, HiveOperation.SHOWCOLUMNS); - commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, HiveOperation.SHOW_TABLESTATUS); - commandType.put(HiveParser.TOK_SHOW_TBLPROPERTIES, HiveOperation.SHOW_TBLPROPERTIES); - commandType.put(HiveParser.TOK_SHOW_CREATETABLE, HiveOperation.SHOW_CREATETABLE); - commandType.put(HiveParser.TOK_SHOWFUNCTIONS, HiveOperation.SHOWFUNCTIONS); - commandType.put(HiveParser.TOK_SHOWINDEXES, HiveOperation.SHOWINDEXES); - commandType.put(HiveParser.TOK_SHOWPARTITIONS, HiveOperation.SHOWPARTITIONS); - commandType.put(HiveParser.TOK_SHOWLOCKS, HiveOperation.SHOWLOCKS); - commandType.put(HiveParser.TOK_SHOWDBLOCKS, HiveOperation.SHOWLOCKS); - commandType.put(HiveParser.TOK_SHOWCONF, HiveOperation.SHOWCONF); - commandType.put(HiveParser.TOK_CREATEFUNCTION, HiveOperation.CREATEFUNCTION); - commandType.put(HiveParser.TOK_DROPFUNCTION, HiveOperation.DROPFUNCTION); - commandType.put(HiveParser.TOK_CREATEMACRO, HiveOperation.CREATEMACRO); - commandType.put(HiveParser.TOK_DROPMACRO, HiveOperation.DROPMACRO); - commandType.put(HiveParser.TOK_CREATEVIEW, HiveOperation.CREATEVIEW); - commandType.put(HiveParser.TOK_DROPVIEW, HiveOperation.DROPVIEW); - commandType.put(HiveParser.TOK_CREATEINDEX, HiveOperation.CREATEINDEX); - commandType.put(HiveParser.TOK_DROPINDEX, HiveOperation.DROPINDEX); - commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, HiveOperation.ALTERINDEX_REBUILD); - commandType.put(HiveParser.TOK_ALTERINDEX_PROPERTIES, HiveOperation.ALTERINDEX_PROPS); - commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); - commandType.put(HiveParser.TOK_DROPVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); - commandType.put(HiveParser.TOK_ALTERVIEW_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS); - commandType.put(HiveParser.TOK_ALTERVIEW_DROPPARTS, HiveOperation.ALTERTABLE_DROPPARTS); - commandType.put(HiveParser.TOK_QUERY, HiveOperation.QUERY); - commandType.put(HiveParser.TOK_LOCKTABLE, HiveOperation.LOCKTABLE); - commandType.put(HiveParser.TOK_UNLOCKTABLE, HiveOperation.UNLOCKTABLE); - commandType.put(HiveParser.TOK_LOCKDB, HiveOperation.LOCKDB); - commandType.put(HiveParser.TOK_UNLOCKDB, HiveOperation.UNLOCKDB); - commandType.put(HiveParser.TOK_CREATEROLE, HiveOperation.CREATEROLE); - commandType.put(HiveParser.TOK_DROPROLE, HiveOperation.DROPROLE); - commandType.put(HiveParser.TOK_GRANT, HiveOperation.GRANT_PRIVILEGE); - commandType.put(HiveParser.TOK_REVOKE, HiveOperation.REVOKE_PRIVILEGE); - commandType.put(HiveParser.TOK_SHOW_GRANT, HiveOperation.SHOW_GRANT); - commandType.put(HiveParser.TOK_GRANT_ROLE, HiveOperation.GRANT_ROLE); - commandType.put(HiveParser.TOK_REVOKE_ROLE, HiveOperation.REVOKE_ROLE); - commandType.put(HiveParser.TOK_SHOW_ROLES, HiveOperation.SHOW_ROLES); - commandType.put(HiveParser.TOK_SHOW_SET_ROLE, HiveOperation.SHOW_ROLES); - commandType.put(HiveParser.TOK_SHOW_ROLE_PRINCIPALS, HiveOperation.SHOW_ROLE_PRINCIPALS); - commandType.put(HiveParser.TOK_SHOW_ROLE_GRANT, HiveOperation.SHOW_ROLE_GRANT); - commandType.put(HiveParser.TOK_ALTERDATABASE_PROPERTIES, HiveOperation.ALTERDATABASE); - commandType.put(HiveParser.TOK_ALTERDATABASE_OWNER, HiveOperation.ALTERDATABASE_OWNER); - commandType.put(HiveParser.TOK_DESCDATABASE, HiveOperation.DESCDATABASE); - commandType.put(HiveParser.TOK_ALTERTABLE_SKEWED, HiveOperation.ALTERTABLE_SKEWED); - commandType.put(HiveParser.TOK_ANALYZE, HiveOperation.ANALYZE_TABLE); - commandType.put(HiveParser.TOK_ALTERVIEW_RENAME, HiveOperation.ALTERVIEW_RENAME); - commandType.put(HiveParser.TOK_ALTERTABLE_PARTCOLTYPE, HiveOperation.ALTERTABLE_PARTCOLTYPE); - commandType.put(HiveParser.TOK_SHOW_COMPACTIONS, HiveOperation.SHOW_COMPACTIONS); - commandType.put(HiveParser.TOK_SHOW_TRANSACTIONS, HiveOperation.SHOW_TRANSACTIONS); - } + static { + commandType.put(HiveParser.TOK_EXPLAIN, HiveOperation.EXPLAIN); + commandType.put(HiveParser.TOK_LOAD, HiveOperation.LOAD); + commandType.put(HiveParser.TOK_EXPORT, HiveOperation.EXPORT); + commandType.put(HiveParser.TOK_IMPORT, HiveOperation.IMPORT); + commandType.put(HiveParser.TOK_CREATEDATABASE, + HiveOperation.CREATEDATABASE); + commandType + .put(HiveParser.TOK_DROPDATABASE, HiveOperation.DROPDATABASE); + commandType.put(HiveParser.TOK_SWITCHDATABASE, + HiveOperation.SWITCHDATABASE); + commandType.put(HiveParser.TOK_CREATETABLE, HiveOperation.CREATETABLE); + commandType.put(HiveParser.TOK_TRUNCATETABLE, + HiveOperation.TRUNCATETABLE); + commandType.put(HiveParser.TOK_DROPTABLE, HiveOperation.DROPTABLE); + commandType.put(HiveParser.TOK_DESCTABLE, HiveOperation.DESCTABLE); + commandType + .put(HiveParser.TOK_DESCFUNCTION, HiveOperation.DESCFUNCTION); + commandType.put(HiveParser.TOK_MSCK, HiveOperation.MSCK); + commandType.put(HiveParser.TOK_ALTERTABLE_ADDCOLS, + HiveOperation.ALTERTABLE_ADDCOLS); + commandType.put(HiveParser.TOK_ALTERTABLE_REPLACECOLS, + HiveOperation.ALTERTABLE_REPLACECOLS); + commandType.put(HiveParser.TOK_ALTERTABLE_RENAMECOL, + HiveOperation.ALTERTABLE_RENAMECOL); + commandType.put(HiveParser.TOK_ALTERTABLE_RENAME, + HiveOperation.ALTERTABLE_RENAME); + commandType.put(HiveParser.TOK_ALTERTABLE_DROPPARTS, + HiveOperation.ALTERTABLE_DROPPARTS); + commandType.put(HiveParser.TOK_ALTERTABLE_ADDPARTS, + HiveOperation.ALTERTABLE_ADDPARTS); + commandType.put(HiveParser.TOK_ALTERTABLE_TOUCH, + HiveOperation.ALTERTABLE_TOUCH); + commandType.put(HiveParser.TOK_ALTERTABLE_ARCHIVE, + HiveOperation.ALTERTABLE_ARCHIVE); + commandType.put(HiveParser.TOK_ALTERTABLE_UNARCHIVE, + HiveOperation.ALTERTABLE_UNARCHIVE); + commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, + HiveOperation.ALTERTABLE_PROPERTIES); + commandType.put(HiveParser.TOK_DROPTABLE_PROPERTIES, + HiveOperation.ALTERTABLE_PROPERTIES); + commandType.put(HiveParser.TOK_SHOWDATABASES, + HiveOperation.SHOWDATABASES); + commandType.put(HiveParser.TOK_SHOWTABLES, HiveOperation.SHOWTABLES); + commandType.put(HiveParser.TOK_SHOWCOLUMNS, HiveOperation.SHOWCOLUMNS); + commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, + HiveOperation.SHOW_TABLESTATUS); + commandType.put(HiveParser.TOK_SHOW_TBLPROPERTIES, + HiveOperation.SHOW_TBLPROPERTIES); + commandType.put(HiveParser.TOK_SHOW_CREATETABLE, + HiveOperation.SHOW_CREATETABLE); + commandType.put(HiveParser.TOK_SHOWFUNCTIONS, + HiveOperation.SHOWFUNCTIONS); + commandType.put(HiveParser.TOK_SHOWINDEXES, HiveOperation.SHOWINDEXES); + commandType.put(HiveParser.TOK_SHOWPARTITIONS, + HiveOperation.SHOWPARTITIONS); + commandType.put(HiveParser.TOK_SHOWLOCKS, HiveOperation.SHOWLOCKS); + commandType.put(HiveParser.TOK_SHOWDBLOCKS, HiveOperation.SHOWLOCKS); + commandType.put(HiveParser.TOK_SHOWCONF, HiveOperation.SHOWCONF); + commandType.put(HiveParser.TOK_CREATEFUNCTION, + HiveOperation.CREATEFUNCTION); + commandType + .put(HiveParser.TOK_DROPFUNCTION, HiveOperation.DROPFUNCTION); + commandType.put(HiveParser.TOK_CREATEMACRO, HiveOperation.CREATEMACRO); + commandType.put(HiveParser.TOK_DROPMACRO, HiveOperation.DROPMACRO); + commandType.put(HiveParser.TOK_CREATEVIEW, HiveOperation.CREATEVIEW); + commandType.put(HiveParser.TOK_DROPVIEW, HiveOperation.DROPVIEW); + commandType.put(HiveParser.TOK_CREATEINDEX, HiveOperation.CREATEINDEX); + commandType.put(HiveParser.TOK_DROPINDEX, HiveOperation.DROPINDEX); + commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, + HiveOperation.ALTERINDEX_REBUILD); + commandType.put(HiveParser.TOK_ALTERINDEX_PROPERTIES, + HiveOperation.ALTERINDEX_PROPS); + commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, + HiveOperation.ALTERVIEW_PROPERTIES); + commandType.put(HiveParser.TOK_DROPVIEW_PROPERTIES, + HiveOperation.ALTERVIEW_PROPERTIES); + commandType.put(HiveParser.TOK_ALTERVIEW_ADDPARTS, + HiveOperation.ALTERTABLE_ADDPARTS); + commandType.put(HiveParser.TOK_ALTERVIEW_DROPPARTS, + HiveOperation.ALTERTABLE_DROPPARTS); + commandType.put(HiveParser.TOK_QUERY, HiveOperation.QUERY); + commandType.put(HiveParser.TOK_LOCKTABLE, HiveOperation.LOCKTABLE); + commandType.put(HiveParser.TOK_UNLOCKTABLE, HiveOperation.UNLOCKTABLE); + commandType.put(HiveParser.TOK_LOCKDB, HiveOperation.LOCKDB); + commandType.put(HiveParser.TOK_UNLOCKDB, HiveOperation.UNLOCKDB); + commandType.put(HiveParser.TOK_CREATEROLE, HiveOperation.CREATEROLE); + commandType.put(HiveParser.TOK_DROPROLE, HiveOperation.DROPROLE); + commandType.put(HiveParser.TOK_GRANT, HiveOperation.GRANT_PRIVILEGE); + commandType.put(HiveParser.TOK_REVOKE, HiveOperation.REVOKE_PRIVILEGE); + commandType.put(HiveParser.TOK_SHOW_GRANT, HiveOperation.SHOW_GRANT); + commandType.put(HiveParser.TOK_GRANT_ROLE, HiveOperation.GRANT_ROLE); + commandType.put(HiveParser.TOK_REVOKE_ROLE, HiveOperation.REVOKE_ROLE); + commandType.put(HiveParser.TOK_SHOW_ROLES, HiveOperation.SHOW_ROLES); + commandType.put(HiveParser.TOK_SHOW_SET_ROLE, HiveOperation.SHOW_ROLES); + commandType.put(HiveParser.TOK_SHOW_ROLE_PRINCIPALS, + HiveOperation.SHOW_ROLE_PRINCIPALS); + commandType.put(HiveParser.TOK_SHOW_ROLE_GRANT, + HiveOperation.SHOW_ROLE_GRANT); + commandType.put(HiveParser.TOK_ALTERDATABASE_PROPERTIES, + HiveOperation.ALTERDATABASE); + commandType.put(HiveParser.TOK_ALTERDATABASE_OWNER, + HiveOperation.ALTERDATABASE_OWNER); + commandType + .put(HiveParser.TOK_DESCDATABASE, HiveOperation.DESCDATABASE); + commandType.put(HiveParser.TOK_ALTERTABLE_SKEWED, + HiveOperation.ALTERTABLE_SKEWED); + commandType.put(HiveParser.TOK_ANALYZE, HiveOperation.ANALYZE_TABLE); + commandType.put(HiveParser.TOK_ALTERVIEW_RENAME, + HiveOperation.ALTERVIEW_RENAME); + commandType.put(HiveParser.TOK_ALTERTABLE_PARTCOLTYPE, + HiveOperation.ALTERTABLE_PARTCOLTYPE); + commandType.put(HiveParser.TOK_SHOW_COMPACTIONS, + HiveOperation.SHOW_COMPACTIONS); + commandType.put(HiveParser.TOK_SHOW_TRANSACTIONS, + HiveOperation.SHOW_TRANSACTIONS); + } - static { - tablePartitionCommandType.put( - HiveParser.TOK_ALTERTABLE_PROTECTMODE, - new HiveOperation[] { HiveOperation.ALTERTABLE_PROTECTMODE, - HiveOperation.ALTERPARTITION_PROTECTMODE }); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT, - new HiveOperation[] { HiveOperation.ALTERTABLE_FILEFORMAT, - HiveOperation.ALTERPARTITION_FILEFORMAT }); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_LOCATION, - new HiveOperation[] { HiveOperation.ALTERTABLE_LOCATION, - HiveOperation.ALTERPARTITION_LOCATION }); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_MERGEFILES, - new HiveOperation[] {HiveOperation.ALTERTABLE_MERGEFILES, - HiveOperation.ALTERPARTITION_MERGEFILES }); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, - new HiveOperation[] {HiveOperation.ALTERTABLE_SERIALIZER, - HiveOperation.ALTERPARTITION_SERIALIZER }); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, - new HiveOperation[] {HiveOperation.ALTERTABLE_SERDEPROPERTIES, - HiveOperation.ALTERPARTITION_SERDEPROPERTIES }); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_RENAMEPART, - new HiveOperation[] {null, HiveOperation.ALTERTABLE_RENAMEPART}); - tablePartitionCommandType.put(HiveParser.TOK_COMPACT, - new HiveOperation[] {HiveOperation.ALTERTABLE_COMPACT, HiveOperation.ALTERTABLE_COMPACT}); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION, - new HiveOperation[] {HiveOperation.ALTERTBLPART_SKEWED_LOCATION, - HiveOperation.ALTERTBLPART_SKEWED_LOCATION }); - tablePartitionCommandType.put(HiveParser.TOK_TABLEBUCKETS, - new HiveOperation[] {HiveOperation.ALTERTABLE_BUCKETNUM, - HiveOperation.ALTERPARTITION_BUCKETNUM}); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_CLUSTER_SORT, - new HiveOperation[] {HiveOperation.ALTERTABLE_CLUSTER_SORT, - HiveOperation.ALTERTABLE_CLUSTER_SORT}); - } + static { + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_PROTECTMODE, + new HiveOperation[] { HiveOperation.ALTERTABLE_PROTECTMODE, + HiveOperation.ALTERPARTITION_PROTECTMODE }); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT, + new HiveOperation[] { HiveOperation.ALTERTABLE_FILEFORMAT, + HiveOperation.ALTERPARTITION_FILEFORMAT }); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_LOCATION, + new HiveOperation[] { HiveOperation.ALTERTABLE_LOCATION, + HiveOperation.ALTERPARTITION_LOCATION }); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_MERGEFILES, + new HiveOperation[] { HiveOperation.ALTERTABLE_MERGEFILES, + HiveOperation.ALTERPARTITION_MERGEFILES }); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, + new HiveOperation[] { HiveOperation.ALTERTABLE_SERIALIZER, + HiveOperation.ALTERPARTITION_SERIALIZER }); + tablePartitionCommandType.put( + HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, new HiveOperation[] { + HiveOperation.ALTERTABLE_SERDEPROPERTIES, + HiveOperation.ALTERPARTITION_SERDEPROPERTIES }); + tablePartitionCommandType + .put(HiveParser.TOK_ALTERTABLE_RENAMEPART, new HiveOperation[] { + null, HiveOperation.ALTERTABLE_RENAMEPART }); + tablePartitionCommandType.put(HiveParser.TOK_COMPACT, + new HiveOperation[] { HiveOperation.ALTERTABLE_COMPACT, + HiveOperation.ALTERTABLE_COMPACT }); + tablePartitionCommandType.put( + HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION, + new HiveOperation[] { + HiveOperation.ALTERTBLPART_SKEWED_LOCATION, + HiveOperation.ALTERTBLPART_SKEWED_LOCATION }); + tablePartitionCommandType.put(HiveParser.TOK_TABLEBUCKETS, + new HiveOperation[] { HiveOperation.ALTERTABLE_BUCKETNUM, + HiveOperation.ALTERPARTITION_BUCKETNUM }); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_CLUSTER_SORT, + new HiveOperation[] { HiveOperation.ALTERTABLE_CLUSTER_SORT, + HiveOperation.ALTERTABLE_CLUSTER_SORT }); + } - public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) - throws SemanticException { - if (tree.getToken() == null) { - throw new RuntimeException("Empty Syntax Tree"); - } else { - setSessionCommandType(commandType.get(tree.getToken().getType())); + public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) + throws SemanticException { + if (tree.getToken() == null) { + throw new RuntimeException("Empty Syntax Tree"); + } else { + setSessionCommandType(commandType.get(tree.getToken().getType())); - switch (tree.getToken().getType()) { - case HiveParser.TOK_EXPLAIN: - return new ExplainSemanticAnalyzer(conf); - case HiveParser.TOK_EXPLAIN_SQ_REWRITE: - return new ExplainSQRewriteSemanticAnalyzer(conf); - case HiveParser.TOK_LOAD: - return new LoadSemanticAnalyzer(conf); - case HiveParser.TOK_EXPORT: - return new ExportSemanticAnalyzer(conf); - case HiveParser.TOK_IMPORT: - return new ImportSemanticAnalyzer(conf); - case HiveParser.TOK_CREATEDATABASE: - case HiveParser.TOK_DROPDATABASE: - case HiveParser.TOK_SWITCHDATABASE: - case HiveParser.TOK_DROPTABLE: - case HiveParser.TOK_DROPVIEW: - case HiveParser.TOK_DESCDATABASE: - case HiveParser.TOK_DESCTABLE: - case HiveParser.TOK_DESCFUNCTION: - case HiveParser.TOK_MSCK: - case HiveParser.TOK_ALTERTABLE_ADDCOLS: - case HiveParser.TOK_ALTERTABLE_RENAMECOL: - case HiveParser.TOK_ALTERTABLE_REPLACECOLS: - case HiveParser.TOK_ALTERTABLE_RENAME: - case HiveParser.TOK_ALTERTABLE_DROPPARTS: - case HiveParser.TOK_ALTERTABLE_ADDPARTS: - case HiveParser.TOK_ALTERTABLE_PROPERTIES: - case HiveParser.TOK_DROPTABLE_PROPERTIES: - case HiveParser.TOK_ALTERTABLE_SERIALIZER: - case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: - case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE: - case HiveParser.TOK_ALTERINDEX_REBUILD: - case HiveParser.TOK_ALTERINDEX_PROPERTIES: - case HiveParser.TOK_ALTERVIEW_PROPERTIES: - case HiveParser.TOK_DROPVIEW_PROPERTIES: - case HiveParser.TOK_ALTERVIEW_ADDPARTS: - case HiveParser.TOK_ALTERVIEW_DROPPARTS: - case HiveParser.TOK_ALTERVIEW_RENAME: - case HiveParser.TOK_SHOWDATABASES: - case HiveParser.TOK_SHOWTABLES: - case HiveParser.TOK_SHOWCOLUMNS: - case HiveParser.TOK_SHOW_TABLESTATUS: - case HiveParser.TOK_SHOW_TBLPROPERTIES: - case HiveParser.TOK_SHOW_CREATETABLE: - case HiveParser.TOK_SHOWFUNCTIONS: - case HiveParser.TOK_SHOWPARTITIONS: - case HiveParser.TOK_SHOWINDEXES: - case HiveParser.TOK_SHOWLOCKS: - case HiveParser.TOK_SHOWDBLOCKS: - case HiveParser.TOK_SHOW_COMPACTIONS: - case HiveParser.TOK_SHOW_TRANSACTIONS: - case HiveParser.TOK_SHOWCONF: - case HiveParser.TOK_CREATEINDEX: - case HiveParser.TOK_DROPINDEX: - case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: - case HiveParser.TOK_ALTERTABLE_TOUCH: - case HiveParser.TOK_ALTERTABLE_ARCHIVE: - case HiveParser.TOK_ALTERTABLE_UNARCHIVE: - case HiveParser.TOK_LOCKTABLE: - case HiveParser.TOK_UNLOCKTABLE: - case HiveParser.TOK_LOCKDB: - case HiveParser.TOK_UNLOCKDB: - case HiveParser.TOK_CREATEROLE: - case HiveParser.TOK_DROPROLE: - case HiveParser.TOK_GRANT: - case HiveParser.TOK_REVOKE: - case HiveParser.TOK_SHOW_GRANT: - case HiveParser.TOK_GRANT_ROLE: - case HiveParser.TOK_REVOKE_ROLE: - case HiveParser.TOK_SHOW_ROLE_GRANT: - case HiveParser.TOK_SHOW_ROLE_PRINCIPALS: - case HiveParser.TOK_SHOW_ROLES: - case HiveParser.TOK_ALTERDATABASE_PROPERTIES: - case HiveParser.TOK_ALTERDATABASE_OWNER: - case HiveParser.TOK_ALTERTABLE_SKEWED: - case HiveParser.TOK_TRUNCATETABLE: - case HiveParser.TOK_EXCHANGEPARTITION: - case HiveParser.TOK_SHOW_SET_ROLE: + switch (tree.getToken().getType()) { + case HiveParser.TOK_EXPLAIN: + return new ExplainSemanticAnalyzer(conf); + case HiveParser.TOK_EXPLAIN_SQ_REWRITE: + return new ExplainSQRewriteSemanticAnalyzer(conf); + case HiveParser.TOK_LOAD: + return new LoadSemanticAnalyzer(conf); + case HiveParser.TOK_EXPORT: + return new ExportSemanticAnalyzer(conf); + case HiveParser.TOK_IMPORT: + return new ImportSemanticAnalyzer(conf); + case HiveParser.TOK_CREATEDATABASE: + case HiveParser.TOK_DROPDATABASE: + case HiveParser.TOK_SWITCHDATABASE: + case HiveParser.TOK_DROPTABLE: + case HiveParser.TOK_DROPVIEW: + case HiveParser.TOK_DESCDATABASE: + case HiveParser.TOK_DESCTABLE: + case HiveParser.TOK_DESCFUNCTION: + case HiveParser.TOK_MSCK: + case HiveParser.TOK_ALTERTABLE_ADDCOLS: + case HiveParser.TOK_ALTERTABLE_RENAMECOL: + case HiveParser.TOK_ALTERTABLE_REPLACECOLS: + case HiveParser.TOK_ALTERTABLE_RENAME: + case HiveParser.TOK_ALTERTABLE_DROPPARTS: + case HiveParser.TOK_ALTERTABLE_ADDPARTS: + case HiveParser.TOK_ALTERTABLE_PROPERTIES: + case HiveParser.TOK_DROPTABLE_PROPERTIES: + case HiveParser.TOK_ALTERTABLE_SERIALIZER: + case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: + case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE: + case HiveParser.TOK_ALTERINDEX_REBUILD: + case HiveParser.TOK_ALTERINDEX_PROPERTIES: + case HiveParser.TOK_ALTERVIEW_PROPERTIES: + case HiveParser.TOK_DROPVIEW_PROPERTIES: + case HiveParser.TOK_ALTERVIEW_ADDPARTS: + case HiveParser.TOK_ALTERVIEW_DROPPARTS: + case HiveParser.TOK_ALTERVIEW_RENAME: + case HiveParser.TOK_SHOWDATABASES: + case HiveParser.TOK_SHOWTABLES: + case HiveParser.TOK_SHOWCOLUMNS: + case HiveParser.TOK_SHOW_TABLESTATUS: + case HiveParser.TOK_SHOW_TBLPROPERTIES: + case HiveParser.TOK_SHOW_CREATETABLE: + case HiveParser.TOK_SHOWFUNCTIONS: + case HiveParser.TOK_SHOWPARTITIONS: + case HiveParser.TOK_SHOWINDEXES: + case HiveParser.TOK_SHOWLOCKS: + case HiveParser.TOK_SHOWDBLOCKS: + case HiveParser.TOK_SHOW_COMPACTIONS: + case HiveParser.TOK_SHOW_TRANSACTIONS: + case HiveParser.TOK_SHOWCONF: + case HiveParser.TOK_CREATEINDEX: + case HiveParser.TOK_DROPINDEX: + case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: + case HiveParser.TOK_ALTERTABLE_TOUCH: + case HiveParser.TOK_ALTERTABLE_ARCHIVE: + case HiveParser.TOK_ALTERTABLE_UNARCHIVE: + case HiveParser.TOK_LOCKTABLE: + case HiveParser.TOK_UNLOCKTABLE: + case HiveParser.TOK_LOCKDB: + case HiveParser.TOK_UNLOCKDB: + case HiveParser.TOK_CREATEROLE: + case HiveParser.TOK_DROPROLE: + case HiveParser.TOK_GRANT: + case HiveParser.TOK_REVOKE: + case HiveParser.TOK_SHOW_GRANT: + case HiveParser.TOK_GRANT_ROLE: + case HiveParser.TOK_REVOKE_ROLE: + case HiveParser.TOK_SHOW_ROLE_GRANT: + case HiveParser.TOK_SHOW_ROLE_PRINCIPALS: + case HiveParser.TOK_SHOW_ROLES: + case HiveParser.TOK_ALTERDATABASE_PROPERTIES: + case HiveParser.TOK_ALTERDATABASE_OWNER: + case HiveParser.TOK_ALTERTABLE_SKEWED: + case HiveParser.TOK_TRUNCATETABLE: + case HiveParser.TOK_EXCHANGEPARTITION: + case HiveParser.TOK_SHOW_SET_ROLE: + case HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS: + return new DDLSemanticAnalyzer(conf); + case HiveParser.TOK_ALTERTABLE_PARTITION: + if (tree.getChildCount() == 2 + && ((ASTNode) tree.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS) + return new DDLSemanticAnalyzer(conf); + HiveOperation commandType = null; + Integer type = ((ASTNode) tree.getChild(1)).getToken() + .getType(); + if (tree.getChild(0).getChildCount() > 1) { + commandType = tablePartitionCommandType.get(type)[1]; + } else { + commandType = tablePartitionCommandType.get(type)[0]; + } + setSessionCommandType(commandType); + return new DDLSemanticAnalyzer(conf); - return new DDLSemanticAnalyzer(conf); - case HiveParser.TOK_ALTERTABLE_PARTITION: - HiveOperation commandType = null; - Integer type = ((ASTNode) tree.getChild(1)).getToken().getType(); - if (tree.getChild(0).getChildCount() > 1) { - commandType = tablePartitionCommandType.get(type)[1]; - } else { - commandType = tablePartitionCommandType.get(type)[0]; - } - setSessionCommandType(commandType); - return new DDLSemanticAnalyzer(conf); + case HiveParser.TOK_CREATEFUNCTION: + case HiveParser.TOK_DROPFUNCTION: + return new FunctionSemanticAnalyzer(conf); - case HiveParser.TOK_CREATEFUNCTION: - case HiveParser.TOK_DROPFUNCTION: - return new FunctionSemanticAnalyzer(conf); + case HiveParser.TOK_ANALYZE: + return new ColumnStatsSemanticAnalyzer(conf, tree); - case HiveParser.TOK_ANALYZE: - return new ColumnStatsSemanticAnalyzer(conf, tree); + case HiveParser.TOK_CREATEMACRO: + case HiveParser.TOK_DROPMACRO: + return new MacroSemanticAnalyzer(conf); + default: + return new SemanticAnalyzer(conf); + } + } + } - case HiveParser.TOK_CREATEMACRO: - case HiveParser.TOK_DROPMACRO: - return new MacroSemanticAnalyzer(conf); - default: - return new SemanticAnalyzer(conf); - } - } - } + private static void setSessionCommandType(HiveOperation commandType) { + if (SessionState.get() != null) { + SessionState.get().setCommandType(commandType); + } + } - private static void setSessionCommandType(HiveOperation commandType) { - if (SessionState.get() != null) { - SessionState.get().setCommandType(commandType); - } - } - - private SemanticAnalyzerFactory() { - // prevent instantiation - } + private SemanticAnalyzerFactory() { + // prevent instantiation + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsWork.java index 3cae727..6cf57db 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsWork.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.plan; import java.io.Serializable; +import java.util.HashMap; import org.apache.hadoop.hive.ql.exec.ListSinkOperator; @@ -28,58 +29,57 @@ */ @Explain(displayName = "Column Stats Work") public class ColumnStatsWork implements Serializable { - private static final long serialVersionUID = 1L; - private FetchWork fWork; - private ColumnStatsDesc colStats; - private static final int LIMIT = -1; - - - public ColumnStatsWork() { - } - - public ColumnStatsWork(FetchWork work, ColumnStatsDesc colStats) { - this.fWork = work; - this.setColStats(colStats); - } - - @Override - public String toString() { - String ret; - ret = fWork.toString(); - return ret; - } - - public FetchWork getfWork() { - return fWork; - } - - public void setfWork(FetchWork fWork) { - this.fWork = fWork; - } - - @Explain(displayName = "Column Stats Desc") - public ColumnStatsDesc getColStats() { - return colStats; - } - - public void setColStats(ColumnStatsDesc colStats) { - this.colStats = colStats; - } - - public ListSinkOperator getSink() { - return fWork.getSink(); - } - - public void initializeForFetch() { - fWork.initializeForFetch(); - } - - public int getLeastNumRows() { - return fWork.getLeastNumRows(); - } - - public static int getLimit() { - return LIMIT; - } + private static final long serialVersionUID = 1L; + private FetchWork fWork; + private ColumnStatsDesc colStats; + private static final int LIMIT = -1; + + public ColumnStatsWork() { + } + + public ColumnStatsWork(FetchWork work, ColumnStatsDesc colStats) { + this.fWork = work; + this.setColStats(colStats); + } + + @Override + public String toString() { + String ret; + ret = fWork.toString(); + return ret; + } + + public FetchWork getfWork() { + return fWork; + } + + public void setfWork(FetchWork fWork) { + this.fWork = fWork; + } + + @Explain(displayName = "Column Stats Desc") + public ColumnStatsDesc getColStats() { + return colStats; + } + + public void setColStats(ColumnStatsDesc colStats) { + this.colStats = colStats; + } + + public ListSinkOperator getSink() { + return fWork.getSink(); + } + + public void initializeForFetch() { + fWork.initializeForFetch(); + } + + public int getLeastNumRows() { + return fWork.getLeastNumRows(); + } + + public static int getLimit() { + return LIMIT; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 79d9d16..99e863c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -864,6 +864,7 @@ public void setShowIndexesDesc(ShowIndexesDesc showIndexesDesc) { */ @Explain(displayName = "Describe Table Operator") public DescTableDesc getDescTblDesc() { + return descTblDesc; }