required int64 offset = 1;
- */
- boolean hasOffset();
- /**
- * required int64 offset = 1;
- */
- long getOffset();
-
- // required int64 length = 2;
- /**
- * required int64 length = 2;
- */
- boolean hasLength();
- /**
- * required int64 length = 2;
- */
- long getLength();
-
- // required int32 index = 3;
- /**
- * required int32 index = 3;
- */
- boolean hasIndex();
- /**
- * required int32 index = 3;
- */
- int getIndex();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfo}
- */
- public static final class SplitInfo extends
- com.google.protobuf.GeneratedMessage
- implements SplitInfoOrBuilder {
- // Use SplitInfo.newBuilder() to construct.
- private SplitInfo(com.google.protobuf.GeneratedMessage.Builder> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private SplitInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final SplitInfo defaultInstance;
- public static SplitInfo getDefaultInstance() {
- return defaultInstance;
- }
-
- public SplitInfo getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private SplitInfo(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 8: {
- bitField0_ |= 0x00000001;
- offset_ = input.readInt64();
- break;
- }
- case 16: {
- bitField0_ |= 0x00000002;
- length_ = input.readInt64();
- break;
- }
- case 24: {
- bitField0_ |= 0x00000004;
- index_ = input.readInt32();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hive.metastore.Metastore.SplitInfo.class, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder.class);
- }
-
- public static com.google.protobuf.Parserrequired int64 offset = 1;
- */
- public boolean hasOffset() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * required int64 offset = 1;
- */
- public long getOffset() {
- return offset_;
- }
-
- // required int64 length = 2;
- public static final int LENGTH_FIELD_NUMBER = 2;
- private long length_;
- /**
- * required int64 length = 2;
- */
- public boolean hasLength() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * required int64 length = 2;
- */
- public long getLength() {
- return length_;
- }
-
- // required int32 index = 3;
- public static final int INDEX_FIELD_NUMBER = 3;
- private int index_;
- /**
- * required int32 index = 3;
- */
- public boolean hasIndex() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * required int32 index = 3;
- */
- public int getIndex() {
- return index_;
- }
-
- private void initFields() {
- offset_ = 0L;
- length_ = 0L;
- index_ = 0;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasOffset()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasLength()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasIndex()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeInt64(1, offset_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeInt64(2, length_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeInt32(3, index_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(1, offset_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(2, length_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(3, index_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfo prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfo}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builderrequired int64 offset = 1;
- */
- public boolean hasOffset() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * required int64 offset = 1;
- */
- public long getOffset() {
- return offset_;
- }
- /**
- * required int64 offset = 1;
- */
- public Builder setOffset(long value) {
- bitField0_ |= 0x00000001;
- offset_ = value;
- onChanged();
- return this;
- }
- /**
- * required int64 offset = 1;
- */
- public Builder clearOffset() {
- bitField0_ = (bitField0_ & ~0x00000001);
- offset_ = 0L;
- onChanged();
- return this;
- }
-
- // required int64 length = 2;
- private long length_ ;
- /**
- * required int64 length = 2;
- */
- public boolean hasLength() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * required int64 length = 2;
- */
- public long getLength() {
- return length_;
- }
- /**
- * required int64 length = 2;
- */
- public Builder setLength(long value) {
- bitField0_ |= 0x00000002;
- length_ = value;
- onChanged();
- return this;
- }
- /**
- * required int64 length = 2;
- */
- public Builder clearLength() {
- bitField0_ = (bitField0_ & ~0x00000002);
- length_ = 0L;
- onChanged();
- return this;
- }
-
- // required int32 index = 3;
- private int index_ ;
- /**
- * required int32 index = 3;
- */
- public boolean hasIndex() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * required int32 index = 3;
- */
- public int getIndex() {
- return index_;
- }
- /**
- * required int32 index = 3;
- */
- public Builder setIndex(int value) {
- bitField0_ |= 0x00000004;
- index_ = value;
- onChanged();
- return this;
- }
- /**
- * required int32 index = 3;
- */
- public Builder clearIndex() {
- bitField0_ = (bitField0_ & ~0x00000004);
- index_ = 0;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.SplitInfo)
- }
-
- static {
- defaultInstance = new SplitInfo(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.SplitInfo)
- }
-
- public interface SplitInfosOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- java.util.Listrepeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int index);
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- int getInfosCount();
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- java.util.List extends org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>
- getInfosOrBuilderList();
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder getInfosOrBuilder(
- int index);
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfos}
- */
- public static final class SplitInfos extends
- com.google.protobuf.GeneratedMessage
- implements SplitInfosOrBuilder {
- // Use SplitInfos.newBuilder() to construct.
- private SplitInfos(com.google.protobuf.GeneratedMessage.Builder> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private SplitInfos(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final SplitInfos defaultInstance;
- public static SplitInfos getDefaultInstance() {
- return defaultInstance;
- }
-
- public SplitInfos getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private SplitInfos(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- infos_ = new java.util.ArrayListrepeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public java.util.Listrepeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public java.util.List extends org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>
- getInfosOrBuilderList() {
- return infos_;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public int getInfosCount() {
- return infos_.size();
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int index) {
- return infos_.get(index);
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder getInfosOrBuilder(
- int index) {
- return infos_.get(index);
- }
-
- private void initFields() {
- infos_ = java.util.Collections.emptyList();
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- for (int i = 0; i < getInfosCount(); i++) {
- if (!getInfos(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- for (int i = 0; i < infos_.size(); i++) {
- output.writeMessage(1, infos_.get(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- for (int i = 0; i < infos_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(1, infos_.get(i));
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfos prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfos}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builderrepeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public java.util.Listrepeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public int getInfosCount() {
- if (infosBuilder_ == null) {
- return infos_.size();
- } else {
- return infosBuilder_.getCount();
- }
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int index) {
- if (infosBuilder_ == null) {
- return infos_.get(index);
- } else {
- return infosBuilder_.getMessage(index);
- }
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder setInfos(
- int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo value) {
- if (infosBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureInfosIsMutable();
- infos_.set(index, value);
- onChanged();
- } else {
- infosBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder setInfos(
- int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder builderForValue) {
- if (infosBuilder_ == null) {
- ensureInfosIsMutable();
- infos_.set(index, builderForValue.build());
- onChanged();
- } else {
- infosBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder addInfos(org.apache.hadoop.hive.metastore.Metastore.SplitInfo value) {
- if (infosBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureInfosIsMutable();
- infos_.add(value);
- onChanged();
- } else {
- infosBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder addInfos(
- int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo value) {
- if (infosBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureInfosIsMutable();
- infos_.add(index, value);
- onChanged();
- } else {
- infosBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder addInfos(
- org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder builderForValue) {
- if (infosBuilder_ == null) {
- ensureInfosIsMutable();
- infos_.add(builderForValue.build());
- onChanged();
- } else {
- infosBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder addInfos(
- int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder builderForValue) {
- if (infosBuilder_ == null) {
- ensureInfosIsMutable();
- infos_.add(index, builderForValue.build());
- onChanged();
- } else {
- infosBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder addAllInfos(
- java.lang.Iterable extends org.apache.hadoop.hive.metastore.Metastore.SplitInfo> values) {
- if (infosBuilder_ == null) {
- ensureInfosIsMutable();
- super.addAll(values, infos_);
- onChanged();
- } else {
- infosBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder clearInfos() {
- if (infosBuilder_ == null) {
- infos_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000001);
- onChanged();
- } else {
- infosBuilder_.clear();
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public Builder removeInfos(int index) {
- if (infosBuilder_ == null) {
- ensureInfosIsMutable();
- infos_.remove(index);
- onChanged();
- } else {
- infosBuilder_.remove(index);
- }
- return this;
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder getInfosBuilder(
- int index) {
- return getInfosFieldBuilder().getBuilder(index);
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder getInfosOrBuilder(
- int index) {
- if (infosBuilder_ == null) {
- return infos_.get(index); } else {
- return infosBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public java.util.List extends org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>
- getInfosOrBuilderList() {
- if (infosBuilder_ != null) {
- return infosBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(infos_);
- }
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder addInfosBuilder() {
- return getInfosFieldBuilder().addBuilder(
- org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance());
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder addInfosBuilder(
- int index) {
- return getInfosFieldBuilder().addBuilder(
- index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance());
- }
- /**
- * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
- */
- public java.util.List+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +public class FileUtils { + private static final Logger LOG = LoggerFactory.getLogger(FileUtils.class); + + /** + * Move a particular file or directory to the trash. + * @param fs FileSystem to use + * @param f path of file or directory to move to trash. + * @param conf + * @return true if move successful + * @throws IOException + */ + public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf, boolean purge) + throws IOException { + LOG.debug("deleting " + f); + boolean result = false; + try { + if(purge) { + LOG.debug("purge is set to true. Not moving to Trash " + f); + } else { + result = Trash.moveToAppropriateTrash(fs, f, conf); + if (result) { + LOG.trace("Moved to trash: " + f); + return true; + } + } + } catch (IOException ioe) { + // for whatever failure reason including that trash has lower encryption zone + // retry with force delete + LOG.warn(ioe.getMessage() + "; Force to delete it."); + } + + result = fs.delete(f, true); + if (!result) { + LOG.error("Failed to delete " + f); + } + return result; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java new file mode 100644 index 0000000000..81f8a8518d --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *
+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.utils; + +public class JavaUtils { + /** + * Standard way of getting classloader in Hive code (outside of Hadoop). + * + * Uses the context loader to get access to classpaths to auxiliary and jars + * added with 'add jar' command. Falls back to current classloader. + * + * In Hadoop-related code, we use Configuration.getClassLoader(). + * @return the class loader + */ + public static ClassLoader getClassLoader() { + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = JavaUtils.class.getClassLoader(); + } + return classLoader; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java new file mode 100644 index 0000000000..3ef7e514fd --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *
+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MetaStoreUtils {
+ private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class);
+
+ /**
+ * Catches exceptions that can't be handled and bundles them to MetaException
+ *
+ * @param e exception to wrap.
+ * @throws MetaException wrapper for the exception
+ */
+ public static void logAndThrowMetaException(Exception e) throws MetaException {
+ String exInfo = "Got exception: " + e.getClass().getName() + " "
+ + e.getMessage();
+ LOG.error(exInfo, e);
+ LOG.error("Converting exception to MetaException");
+ throw new MetaException(exInfo);
+ }
+
+}
diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetastoreVersionInfo.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetastoreVersionInfo.java
new file mode 100644
index 0000000000..de54ff3d38
--- /dev/null
+++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetastoreVersionInfo.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreVersionAnnotation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Class that uses package information to figure out which version of the metastore this program is.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class MetastoreVersionInfo {
+ private static final Logger LOG = LoggerFactory.getLogger(MetastoreVersionInfo.class);
+
+ private static Package myPackage;
+ private static MetastoreVersionAnnotation version;
+
+ static {
+ myPackage = MetastoreVersionAnnotation.class.getPackage();
+ version = myPackage.getAnnotation(MetastoreVersionAnnotation.class);
+ }
+
+ /**
+ * Get the meta-data for the Hive package.
+ * @return
+ */
+ static Package getPackage() {
+ return myPackage;
+ }
+
+ /**
+ * Get the Hive version.
+ * @return the Hive version string, eg. "0.6.3-dev"
+ */
+ public static String getVersion() {
+ return version != null ? version.version() : "Unknown";
+ }
+
+ /**
+ * Get the Hive short version, with major/minor/change version numbers.
+ * @return short version string, eg. "0.6.3"
+ */
+ public static String getShortVersion() {
+ return version != null ? version.shortVersion() : "Unknown";
+ }
+
+ /**
+ * Get the git revision number for the root directory
+ * @return the revision number, eg. "451451"
+ */
+ public static String getRevision() {
+ return version != null ? version.revision() : "Unknown";
+ }
+
+ /**
+ * Get the branch on which this originated.
+ * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
+ */
+ public static String getBranch() {
+ return version != null ? version.branch() : "Unknown";
+ }
+
+ /**
+ * The date that Hive was compiled.
+ * @return the compilation date in unix date format
+ */
+ public static String getDate() {
+ return version != null ? version.date() : "Unknown";
+ }
+
+ /**
+ * The user that compiled Hive.
+ * @return the username of the user
+ */
+ public static String getUser() {
+ return version != null ? version.user() : "Unknown";
+ }
+
+ /**
+ * Get the git URL for the root Hive directory.
+ */
+ public static String getUrl() {
+ return version != null ? version.url() : "Unknown";
+ }
+
+ /**
+ * Get the checksum of the source files from which Hive was
+ * built.
+ **/
+ public static String getSrcChecksum() {
+ return version != null ? version.srcChecksum() : "Unknown";
+ }
+
+ /**
+ * Returns the buildVersion which includes version,
+ * revision, user and date.
+ */
+ public static String getBuildVersion(){
+ return MetastoreVersionInfo.getVersion() +
+ " from " + MetastoreVersionInfo.getRevision() +
+ " by " + MetastoreVersionInfo.getUser() +
+ " source checksum " + MetastoreVersionInfo.getSrcChecksum();
+ }
+
+ public static void main(String[] args) {
+ LOG.debug("version: "+ version);
+ System.out.println("Hive " + getVersion());
+ System.out.println("Git " + getUrl() + " -r " + getRevision());
+ System.out.println("Compiled by " + getUser() + " on " + getDate());
+ System.out.println("From source with checksum " + getSrcChecksum());
+ }
+
+}
diff --git metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto standalone-metastore/src/main/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
similarity index 100%
rename from metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
rename to standalone-metastore/src/main/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
diff --git standalone-metastore/src/main/resources/saveVersion.sh standalone-metastore/src/main/resources/saveVersion.sh
new file mode 100755
index 0000000000..c9bde68835
--- /dev/null
+++ standalone-metastore/src/main/resources/saveVersion.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This file is used to generate the package-info.java class that
+# records the version, revision, branch, user, timestamp, and url
+unset LANG
+unset LC_CTYPE
+unset LC_TIME
+version=$1
+shortversion=$2
+src_dir=$3
+revision=$4
+branch=$5
+url=$6
+user=`whoami`
+date=`date`
+dir=`pwd`
+cwd=`dirname $dir`
+if [ "$revision" = "" ]; then
+ if git rev-parse HEAD 2>/dev/null > /dev/null ; then
+ revision=`git log -1 --pretty=format:"%H"`
+ hostname=`hostname`
+ branch=`git branch | sed -n -e 's/^* //p'`
+ url="git://${hostname}${cwd}"
+ elif [ -d .svn ]; then
+ revision=`svn info ../ | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'`
+ url=`svn info ../ | sed -n -e 's/^URL: \(.*\)/\1/p'`
+ # Get canonical branch (branches/X, tags/X, or trunk)
+ branch=`echo $url | sed -n -e 's,.*\(branches/.*\)$,\1,p' \
+ -e 's,.*\(tags/.*\)$,\1,p' \
+ -e 's,.*trunk$,trunk,p'`
+ else
+ revision="Unknown"
+ branch="Unknown"
+ url="file://$cwd"
+ fi
+fi
+if [ "$branch" = "" ]; then
+ branch="Unknown"
+fi
+if [ "$url" = "" ]; then
+ url="file://$cwd"
+fi
+
+if [ -x /sbin/md5 ]; then
+ md5="/sbin/md5"
+else
+ md5="md5sum"
+fi
+
+srcChecksum=`find ../ -name '*.java' | grep -v generated-sources | LC_ALL=C sort | xargs $md5 | $md5 | cut -d ' ' -f 1`
+
+mkdir -p $src_dir/gen/org/apache/hadoop/hive/metastore/annotation
+
+# In Windows, all the following string ends with \r, need to get rid of them
+branch=`echo $branch | tr -d '\r'`
+user=`echo $user | tr -d '\r'`
+date=`echo $date | tr -d '\r'`
+url=`echo $url | tr -d '\r'`
+srcChecksum=`echo $srcChecksum | tr -d '\r'`
+
+cat << EOF | \
+ sed -e "s/VERSION/$version/" -e "s/SHORTVERSION/$shortversion/" \
+ -e "s/USER/$user/" -e "s/DATE/$date/" \
+ -e "s|URL|$url|" -e "s/REV/$revision/" \
+ -e "s|BRANCH|$branch|" -e "s/SRCCHECKSUM/$srcChecksum/" \
+ > $src_dir/gen/org/apache/hadoop/hive/metastore/annotation/package-info.java
+/*
+ * Generated by saveVersion.sh
+ */
+@MetastoreVersionAnnotation(version="VERSION", shortVersion="SHORTVERSION",
+ revision="REV", branch="BRANCH",
+ user="USER", date="DATE", url="URL",
+ srcChecksum="SRCCHECKSUM")
+package org.apache.hadoop.hive.metastore.annotation;
+EOF
diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java
similarity index 85%
rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java
rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java
index 40700daab8..6e06026f77 100644
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java
+++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -21,13 +21,16 @@
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats;
import org.apache.hadoop.hive.metastore.AggregateStatsCache.Key;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hive.common.util.BloomFilter;
import org.junit.After;
import org.junit.AfterClass;
@@ -46,15 +49,15 @@
static int NUM_COLS = 5;
static int MAX_CACHE_NODES = 10;
static int MAX_PARTITIONS_PER_CACHE_NODE = 10;
- static String TIME_TO_LIVE = "20s";
- static String MAX_WRITER_WAIT = "1s";
- static String MAX_READER_WAIT = "1s";
- static float FALSE_POSITIVE_PROBABILITY = (float) 0.01;
- static float MAX_VARIANCE = (float) 0.5;
+ static long TIME_TO_LIVE = 2;
+ static long MAX_WRITER_WAIT = 1;
+ static long MAX_READER_WAIT = 1;
+ static double FALSE_POSITIVE_PROBABILITY = 0.01;
+ static double MAX_VARIANCE = 0.5;
static AggregateStatsCache cache;
- static List