diff --git a/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/src/main/java/org/apache/hadoop/hbase/ClusterId.java
new file mode 100644
index 0000000..1025b88
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/hbase/ClusterId.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.util.UUID;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * The identifier for this cluster.
+ * It is serialized to the filesystem and up into zookeeper. This is a container for the id.
+ * Also knows how to serialize and deserialize the cluster id.
+ */
+@InterfaceAudience.Private
+public class ClusterId {
+ private final String id;
+
+ /**
+ * New ClusterID. Generates a uniqueid.
+ */
+ public ClusterId() {
+ this(UUID.randomUUID().toString());
+ }
+
+ ClusterId(final String uuid) {
+ this.id = uuid;
+ }
+
+ /**
+ * @return The clusterid serialized using pb w/ pb magic prefix
+ */
+ public byte [] toByteArray() {
+ return ProtobufUtil.prependPBMagic(convert().toByteArray());
+ }
+
+ /**
+ * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
+ * @return An instance of {@link ClusterId} made from bytes
+ * @throws DeserializationException
+ * @see {@link #toByteArray()}
+ */
+ public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException {
+ if (ProtobufUtil.isPBMagicPrefix(bytes)) {
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
+ ClusterIdProtos.ClusterId cid = null;
+ try {
+ cid = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+ } catch (InvalidProtocolBufferException e) {
+ throw new DeserializationException(e);
+ }
+ return convert(cid);
+ } else {
+ // Presume it was written out this way, the old way.
+ return new ClusterId(Bytes.toString(bytes));
+ }
+ }
+
+ /**
+ * @return A pb instance to represent this instance.
+ */
+ ClusterIdProtos.ClusterId convert() {
+ ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
+ return builder.setClusterId(this.id).build();
+ }
+
+ /**
+ * @param cid
+ * @return A {@link ClusterId} made from the passed in cid
+ */
+ static ClusterId convert(final ClusterIdProtos.ClusterId cid) {
+ return new ClusterId(cid.getClusterId());
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 5862f15..7b0f304 100644
--- a/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
import org.apache.hadoop.hbase.util.Bytes;
@@ -41,6 +43,8 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
/**
* An HColumnDescriptor contains information about a column family such as the
@@ -94,7 +98,7 @@ public class HColumnDescriptor implements WritableComparable
*/
public static final String DEFAULT_COMPRESSION =
Compression.Algorithm.NONE.getName();
-
+
/**
* Default value of the flag that enables data block encoding on disk, as
* opposed to encoding in cache only. We encode blocks everywhere by default,
@@ -172,10 +176,8 @@ public class HColumnDescriptor implements WritableComparable
DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
- DEFAULT_VALUES.put(ENCODE_ON_DISK,
- String.valueOf(DEFAULT_ENCODE_ON_DISK));
- DEFAULT_VALUES.put(DATA_BLOCK_ENCODING,
- String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
+ DEFAULT_VALUES.put(ENCODE_ON_DISK, String.valueOf(DEFAULT_ENCODE_ON_DISK));
+ DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
for (String s : DEFAULT_VALUES.keySet()) {
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
}
@@ -195,7 +197,9 @@ public class HColumnDescriptor implements WritableComparable
/**
* Default constructor. Must be present for Writable.
+ * @deprecated Used by Writables and Writables are going away.
*/
+ @Deprecated
public HColumnDescriptor() {
this.name = null;
}
@@ -877,8 +881,10 @@ public class HColumnDescriptor implements WritableComparable
return result;
}
- // Writable
-
+ /**
+ * @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead.
+ */
+ @Deprecated
public void readFields(DataInput in) throws IOException {
int version = in.readByte();
if (version < 6) {
@@ -945,6 +951,10 @@ public class HColumnDescriptor implements WritableComparable
}
}
+ /**
+ * @deprecated Writables are going away. Use {@link #toByteArray()} instead.
+ */
+ @Deprecated
public void write(DataOutput out) throws IOException {
out.writeByte(COLUMN_DESCRIPTOR_VERSION);
Bytes.writeByteArray(out, this.name);
@@ -970,4 +980,58 @@ public class HColumnDescriptor implements WritableComparable
}
return result;
}
-}
+
+ /**
+ * @return This instance serialized with pb with pb magic prefix
+ * @see {@link #parseFrom(byte[])}
+ */
+ public byte [] toByteArray() {
+ return ProtobufUtil.prependPBMagic(convert().toByteArray());
+ }
+
+ /**
+ * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
+ * @return An instance of {@link HColumnDescriptor} made from bytes
+ * @throws DeserializationException
+ * @see {@link #toByteArray()}
+ */
+ public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
+ if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
+ ColumnFamilySchema cfs = null;
+ try {
+ cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+ } catch (InvalidProtocolBufferException e) {
+ throw new DeserializationException(e);
+ }
+ return convert(cfs);
+ }
+
+ /**
+ * @param cfs
+ * @return An {@link HColumnDescriptor} made from the passed in cfs
+ */
+ static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
+ HColumnDescriptor hcd = new HColumnDescriptor(cfs.getName().toByteArray());
+ for (ColumnFamilySchema.Attribute a: cfs.getAttributesList()) {
+ hcd.setValue(a.getName().toByteArray(), a.getValue().toByteArray());
+ }
+ return hcd;
+ }
+
+ /**
+ * @return Convert this instance to a the pb column family type
+ */
+ ColumnFamilySchema convert() {
+ ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
+ builder.setName(ByteString.copyFrom(getName()));
+ for (Map.Entry e: this.values.entrySet()) {
+ ColumnFamilySchema.Attribute.Builder aBuilder = ColumnFamilySchema.Attribute.newBuilder();
+ aBuilder.setName(ByteString.copyFrom(e.getKey().get()));
+ aBuilder.setValue(ByteString.copyFrom(e.getValue().get()));
+ builder.addAttributes(aBuilder.build());
+ }
+ return builder.build();
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 8d83ff3..377ab15 100644
--- a/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -19,10 +19,12 @@
*/
package org.apache.hadoop.hbase;
+import java.io.BufferedInputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.IOException;
+import java.io.InputStream;
import java.util.Arrays;
import org.apache.commons.logging.Log;
@@ -30,21 +32,28 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.hbase.util.MD5Hash;
+import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.VersionedWritable;
import org.apache.hadoop.io.WritableComparable;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
/**
* HRegion information.
- * Contains HRegion id, start and end keys, a reference to this
- * HRegions' table descriptor, etc.
+ * Contains HRegion id, start and end keys, a reference to this HRegions' table descriptor, etc.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@@ -194,7 +203,10 @@ implements WritableComparable {
setHashCode();
}
- /** Default constructor - creates empty object */
+ /** Default constructor - creates empty object
+ * @deprecated Used by Writables and Writables are going away.
+ */
+ @Deprecated
public HRegionInfo() {
super();
}
@@ -229,8 +241,7 @@ implements WritableComparable {
* @param endKey end of key range
* @throws IllegalArgumentException
*/
- public HRegionInfo(final byte[] tableName, final byte[] startKey,
- final byte[] endKey)
+ public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey)
throws IllegalArgumentException {
this(tableName, startKey, endKey, false);
}
@@ -246,8 +257,8 @@ implements WritableComparable {
* regions that may or may not hold references to this region.
* @throws IllegalArgumentException
*/
- public HRegionInfo(final byte[] tableName, final byte[] startKey,
- final byte[] endKey, final boolean split)
+ public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey,
+ final boolean split)
throws IllegalArgumentException {
this(tableName, startKey, endKey, split, System.currentTimeMillis());
}
@@ -539,7 +550,7 @@ implements WritableComparable {
Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY);
return firstKeyInRange && lastKeyInRange;
}
-
+
/**
* Return true if the given row falls in this region.
*/
@@ -700,10 +711,11 @@ implements WritableComparable {
return VERSION;
}
- //
- // Writable
- //
-
+ /**
+ * @deprecated Use protobuf serialization instead. See {@link #toByteArray()} and
+ * {@link #toDelimitedByteArray()}
+ */
+ @Deprecated
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
@@ -717,6 +729,11 @@ implements WritableComparable {
out.writeInt(hashCode);
}
+ /**
+ * @deprecated Use protobuf deserialization instead. See {@link #parseFrom(byte[])} and
+ * {@link #parseFrom(FSDataInputStream)}
+ */
+ @Deprecated
@Override
public void readFields(DataInput in) throws IOException {
// Read the single version byte. We don't ask the super class do it
@@ -814,4 +831,156 @@ implements WritableComparable {
return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()?
KeyValue.META_COMPARATOR: KeyValue.COMPARATOR;
}
-}
+
+ /**
+ * Convert a HRegionInfo to a RegionInfo
+ *
+ * @param info the HRegionInfo to convert
+ * @return the converted RegionInfo
+ */
+ RegionInfo convert() {
+ return convert(this);
+ }
+
+ /**
+ * Convert a HRegionInfo to a RegionInfo
+ *
+ * @param info the HRegionInfo to convert
+ * @return the converted RegionInfo
+ */
+ public static RegionInfo convert(final HRegionInfo info) {
+ if (info == null) return null;
+ RegionInfo.Builder builder = RegionInfo.newBuilder();
+ builder.setTableName(ByteString.copyFrom(info.getTableName()));
+ builder.setRegionId(info.getRegionId());
+ if (info.getStartKey() != null) {
+ builder.setStartKey(ByteString.copyFrom(info.getStartKey()));
+ }
+ if (info.getEndKey() != null) {
+ builder.setEndKey(ByteString.copyFrom(info.getEndKey()));
+ }
+ builder.setOffline(info.isOffline());
+ builder.setSplit(info.isSplit());
+ return builder.build();
+ }
+
+ /**
+ * Convert a RegionInfo to a HRegionInfo
+ *
+ * @param proto the RegionInfo to convert
+ * @return the converted HRegionInfo
+ */
+ public static HRegionInfo convert(final RegionInfo proto) {
+ if (proto == null) return null;
+ byte [] tableName = proto.getTableName().toByteArray();
+ if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
+ return ROOT_REGIONINFO;
+ } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
+ return FIRST_META_REGIONINFO;
+ }
+ long regionId = proto.getRegionId();
+ byte[] startKey = null;
+ byte[] endKey = null;
+ if (proto.hasStartKey()) {
+ startKey = proto.getStartKey().toByteArray();
+ }
+ if (proto.hasEndKey()) {
+ endKey = proto.getEndKey().toByteArray();
+ }
+ boolean split = false;
+ if (proto.hasSplit()) {
+ split = proto.getSplit();
+ }
+ HRegionInfo hri = new HRegionInfo(tableName, startKey, endKey, split, regionId);
+ if (proto.hasOffline()) {
+ hri.setOffline(proto.getOffline());
+ }
+ return hri;
+ }
+
+ /**
+ * @return This instance serialized as protobuf w/ a magic pb prefix.
+ * @see #parseFrom(byte[]);
+ */
+ public byte [] toByteArray() {
+ byte [] bytes = convert().toByteArray();
+ return ProtobufUtil.prependPBMagic(bytes);
+ }
+
+ /**
+ * @param bytes
+ * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes null
+ * @see {@link #toByteArray()}
+ */
+ public static HRegionInfo parseFromOrNull(final byte [] bytes) {
+ if (bytes == null || bytes.length <= 0) return null;
+ try {
+ return parseFrom(bytes);
+ } catch (DeserializationException e) {
+ return null;
+ }
+ }
+
+ /**
+ * @param bytes A pb RegionInfo serialized with a pb magic prefix.
+ * @return
+ * @throws DeserializationException
+ * @see {@link #toByteArray()}
+ */
+ public static HRegionInfo parseFrom(final byte [] bytes) throws DeserializationException {
+ if (ProtobufUtil.isPBMagicPrefix(bytes)) {
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ try {
+ HBaseProtos.RegionInfo ri =
+ HBaseProtos.RegionInfo.newBuilder().mergeFrom(bytes, pblen, bytes.length - pblen).build();
+ return convert(ri);
+ } catch (InvalidProtocolBufferException e) {
+ throw new DeserializationException(e);
+ }
+ } else {
+ try {
+ return (HRegionInfo)Writables.getWritable(bytes, new HRegionInfo());
+ } catch (IOException e) {
+ throw new DeserializationException(e);
+ }
+ }
+ }
+
+ /**
+ * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use
+ * the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want).
+ * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
+ * @throws IOException
+ * @see {@link #toByteArray()}
+ */
+ public byte [] toDelimitedByteArray() throws IOException {
+ return ProtobufUtil.toDelimitedByteArray(convert());
+ }
+
+ /**
+ * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was
+ * serialized to the stream with {@link #toDelimitedByteArray()}
+ * @param in
+ * @return An instance of HRegionInfo.
+ * @throws IOException
+ */
+ public static HRegionInfo parseFrom(final FSDataInputStream in) throws IOException {
+ // I need to be able to move back in the stream if this is not a pb serialization so I can
+ // do the Writable decoding instead.
+ InputStream is = in.markSupported()? in: new BufferedInputStream(in);
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ is.mark(pblen);
+ byte [] pbuf = new byte[pblen];
+ int read = is.read(pbuf);
+ if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen);
+ if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
+ return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(is));
+ } else {
+ // Presume Writables. Need to reset the stream since it didn't start w/ pb.
+ in.reset();
+ HRegionInfo hri = new HRegionInfo();
+ hri.readFields(in);
+ return hri;
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index af89e3e..5dac1f2 100644
--- a/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -27,6 +27,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
@@ -37,12 +38,17 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.io.hfile.Compression;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.WritableComparable;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
/**
* HTableDescriptor contains the details about an HBase table such as the descriptors of
* all the column families, is the table a catalog table, -ROOT- or
@@ -73,10 +79,8 @@ public class HTableDescriptor implements WritableComparable {
protected final Map values =
new HashMap();
- private static final String FAMILIES = "FAMILIES";
-
public static final String SPLIT_POLICY = "SPLIT_POLICY";
-
+
/**
* INTERNAL Used by HBase Shell interface to access this metadata
* attribute which denotes the maximum size of the store file after which
@@ -185,9 +189,23 @@ public class HTableDescriptor implements WritableComparable {
RESERVED_KEYWORDS.add(IS_META_KEY);
}
+ /**
+ * Cache of whether this is a meta table or not.
+ */
private volatile Boolean meta = null;
+ /**
+ * Cache of whether this is root table or not.
+ */
private volatile Boolean root = null;
- private Boolean isDeferredLog = null;
+ /**
+ * Cache of whether deferred logging set.
+ */
+ private Boolean deferredLog = null;
+
+ /**
+ * True if this instance made by deserializing from pb.
+ */
+ private final boolean pbMade;
/**
* Maps column family name to the respective HColumnDescriptors
@@ -200,7 +218,17 @@ public class HTableDescriptor implements WritableComparable {
* catalog tables, .META. and -ROOT-.
*/
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) {
+ this(name, families, false);
+ }
+
+ /**
+ * INTERNAL Private constructor used internally creating table descriptors for
+ * catalog tables, .META. and -ROOT-.
+ */
+ protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families,
+ final boolean pbMade) {
this.name = name.clone();
+ this.pbMade = pbMade;
this.nameAsString = Bytes.toString(this.name);
setMetaFlags(name);
for(HColumnDescriptor descriptor : families) {
@@ -224,15 +252,26 @@ public class HTableDescriptor implements WritableComparable {
values.entrySet()) {
this.values.put(entry.getKey(), entry.getValue());
}
+ this.pbMade = false;
}
/**
* Default constructor which constructs an empty object.
* For deserializing an HTableDescriptor instance only.
* @see #HTableDescriptor(byte[])
+ * @deprecated Used by Writables and Writables are going away.
*/
+ @Deprecated
public HTableDescriptor() {
super();
+ this.pbMade = false;
+ }
+
+ /**
+ * @return True if made by deserializing from pb
+ */
+ public boolean isPbMade() {
+ return this.pbMade;
}
/**
@@ -260,6 +299,7 @@ public class HTableDescriptor implements WritableComparable {
setMetaFlags(this.name);
this.name = this.isMetaRegion()? name: isLegalTableName(name);
this.nameAsString = Bytes.toString(this.name);
+ this.pbMade = false;
}
/**
@@ -281,6 +321,7 @@ public class HTableDescriptor implements WritableComparable {
desc.values.entrySet()) {
this.values.put(e.getKey(), e.getValue());
}
+ this.pbMade = false;
}
/*
@@ -548,11 +589,11 @@ public class HTableDescriptor implements WritableComparable {
* @see #setDeferredLogFlush(boolean)
*/
public synchronized boolean isDeferredLogFlush() {
- if(this.isDeferredLog == null) {
- this.isDeferredLog =
+ if(this.deferredLog == null) {
+ this.deferredLog =
isSomething(DEFERRED_LOG_FLUSH_KEY, DEFAULT_DEFERRED_LOG_FLUSH);
}
- return this.isDeferredLog;
+ return this.deferredLog;
}
/**
@@ -571,7 +612,7 @@ public class HTableDescriptor implements WritableComparable {
*/
public void setDeferredLogFlush(final boolean isDeferredLogFlush) {
setValue(DEFERRED_LOG_FLUSH_KEY, isDeferredLogFlush? TRUE: FALSE);
- this.isDeferredLog = isDeferredLogFlush;
+ this.deferredLog = isDeferredLogFlush;
}
/**
@@ -836,11 +877,12 @@ public class HTableDescriptor implements WritableComparable {
return result;
}
- // Writable
/**
* INTERNAL This method is a part of {@link WritableComparable} interface
* and is used for de-serialization of the HTableDescriptor over RPC
+ * @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead.
*/
+ @Deprecated
@Override
public void readFields(DataInput in) throws IOException {
int version = in.readInt();
@@ -875,7 +917,9 @@ public class HTableDescriptor implements WritableComparable {
/**
* INTERNAL This method is a part of {@link WritableComparable} interface
* and is used for serialization of the HTableDescriptor over RPC
+ * @deprecated Writables are going away. Use pb {@link #toByteArray()(byte[])} instead.
*/
+ @Deprecated
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(TABLE_DESCRIPTOR_VERSION);
@@ -1198,4 +1242,71 @@ public class HTableDescriptor implements WritableComparable {
// .META. and -ROOT- should return system user as owner, not null (see MasterFileSystem.java:bootstrap()).
return null;
}
-}
+
+ /**
+ * @return This instance serialized with pb with pb magic prefix
+ * @see {@link #parseFrom(byte[])}
+ */
+ public byte [] toByteArray() {
+ return ProtobufUtil.prependPBMagic(convert().toByteArray());
+ }
+
+ /**
+ * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
+ * @return An instance of {@link HTableDescriptor} made from bytes
+ * @throws DeserializationException
+ * @throws IOException
+ * @see {@link #toByteArray()}
+ */
+ public static HTableDescriptor parseFrom(final byte [] bytes)
+ throws DeserializationException, IOException {
+ if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
+ return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
+ }
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ TableSchema ts = null;
+ try {
+ ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+ } catch (InvalidProtocolBufferException e) {
+ throw new DeserializationException(e);
+ }
+ return convert(ts);
+ }
+
+ /**
+ * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
+ */
+ TableSchema convert() {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ builder.setName(ByteString.copyFrom(getName()));
+ for (Map.Entry e: this.values.entrySet()) {
+ TableSchema.Attribute.Builder aBuilder = TableSchema.Attribute.newBuilder();
+ aBuilder.setName(ByteString.copyFrom(e.getKey().get()));
+ aBuilder.setValue(ByteString.copyFrom(e.getValue().get()));
+ builder.addAttributes(aBuilder.build());
+ }
+ for (HColumnDescriptor hcd: getColumnFamilies()) {
+ builder.addColumnFamilies(hcd.convert());
+ }
+ return builder.build();
+ }
+
+ /**
+ * @param ts A pb TableSchema instance.
+ * @return An {@link HTableDescriptor} made from the passed in pb ts.
+ */
+ static HTableDescriptor convert(final TableSchema ts) {
+ List list = ts.getColumnFamiliesList();
+ HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
+ int index = 0;
+ for (ColumnFamilySchema cfs: list) {
+ hcds[index++] = HColumnDescriptor.convert(cfs);
+ }
+ HTableDescriptor htd = new HTableDescriptor(ts.getName().toByteArray(), hcds);
+ for (TableSchema.Attribute a: ts.getAttributesList()) {
+ htd.setValue(a.getName().toByteArray(), a.getValue().toByteArray());
+ }
+ return htd;
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
index 5cac9af..07334f8 100644
--- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
@@ -83,7 +83,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.SoftValueSortedMap;
import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.hbase.zookeeper.ClusterId;
+import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
@@ -637,7 +637,7 @@ public class HConnectionManager {
ZooKeeperKeepAliveConnection zkw = null;
try {
zkw = getKeepAliveZooKeeperWatcher();
- this.clusterId = ClusterId.readClusterIdZNode(zkw);
+ this.clusterId = ZKClusterId.readClusterIdZNode(zkw);
if (clusterId == null) {
LOG.info("ClusterId read in ZooKeeper is null");
}
diff --git a/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/src/main/java/org/apache/hadoop/hbase/io/Reference.java
index 6360059..05f96c6 100644
--- a/src/main/java/org/apache/hadoop/hbase/io/Reference.java
+++ b/src/main/java/org/apache/hadoop/hbase/io/Reference.java
@@ -19,9 +19,11 @@
*/
package org.apache.hadoop.hbase.io;
+import java.io.BufferedInputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -29,10 +31,13 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.Writable;
+import com.google.protobuf.ByteString;
+
/**
* A reference to the top or bottom half of a store file. The file referenced
* lives under a different region. References are made at region split time.
@@ -58,7 +63,7 @@ public class Reference implements Writable {
* For split HStoreFiles, it specifies if the file covers the lower half or
* the upper half of the key range
*/
- public static enum Range {
+ static enum Range {
/** HStoreFile contains upper half of key range */
top,
/** HStoreFile contains lower half of key range */
@@ -66,19 +71,36 @@ public class Reference implements Writable {
}
/**
+ * @param splitRow
+ * @return A {@link Reference} that points at top half of a an hfile
+ */
+ public static Reference createTopReference(final byte [] splitRow) {
+ return new Reference(splitRow, Range.top);
+ }
+
+ /**
+ * @param splitRow
+ * @return A {@link Reference} that points at the bottom half of a an hfile
+ */
+ public static Reference createBottomReference(final byte [] splitRow) {
+ return new Reference(splitRow, Range.bottom);
+ }
+
+ /**
* Constructor
* @param splitRow This is row we are splitting around.
* @param fr
*/
- public Reference(final byte [] splitRow, final Range fr) {
- this.splitkey = splitRow == null?
- null: KeyValue.createFirstOnRow(splitRow).getKey();
+ Reference(final byte [] splitRow, final Range fr) {
+ this.splitkey = splitRow == null? null: KeyValue.createFirstOnRow(splitRow).getKey();
this.region = fr;
}
/**
* Used by serializations.
+ * @deprecated Use the pb serializations instead. Writables are going away.
*/
+ @Deprecated
public Reference() {
this(null, Range.bottom);
}
@@ -106,14 +128,20 @@ public class Reference implements Writable {
return "" + this.region;
}
- // Make it serializable.
-
+ /**
+ * @deprecated Writables are going away. Use the pb serialization methods instead.
+ */
+ @Deprecated
public void write(DataOutput out) throws IOException {
// Write true if we're doing top of the file.
out.writeBoolean(isTopFileRegion(this.region));
Bytes.writeByteArray(out, this.splitkey);
}
+ /**
+ * @deprecated Writables are going away. Use the pb serialization methods instead.
+ */
+ @Deprecated
public void readFields(DataInput in) throws IOException {
boolean tmp = in.readBoolean();
// If true, set region to top.
@@ -129,7 +157,7 @@ public class Reference implements Writable {
throws IOException {
FSDataOutputStream out = fs.create(p, false);
try {
- write(out);
+ out.write(toDelimitedByteArray());
} finally {
out.close();
}
@@ -147,11 +175,61 @@ public class Reference implements Writable {
throws IOException {
FSDataInputStream in = fs.open(p);
try {
+ return parseFrom(in);
+ } finally {
+ in.close();
+ }
+ }
+
+ FSProtos.Reference convert() {
+ FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder();
+ builder.setRange(isTopFileRegion(getFileRegion())?
+ FSProtos.Reference.Range.TOP: FSProtos.Reference.Range.BOTTOM);
+ builder.setSplitkey(ByteString.copyFrom(getSplitKey()));
+ return builder.build();
+ }
+
+ static Reference convert(final FSProtos.Reference r) {
+ return new Reference(r.getSplitkey().toByteArray(),
+ r.getRange() == FSProtos.Reference.Range.TOP? Range.top: Range.bottom);
+ }
+
+ /**
+ * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use
+ * the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what ou want).
+ * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
+ * @throws IOException
+ * @see {@link #toByteArray()}
+ */
+ byte [] toDelimitedByteArray() throws IOException {
+ return ProtobufUtil.toDelimitedByteArray(convert());
+ }
+
+ /**
+ * Parses an {@link Reference} instance from the passed in stream. Presumes the
+ * Reference was serialized to the stream with {@link #toDelimitedByteArray()}
+ * @param in
+ * @return An instance of Reference.
+ * @throws IOException
+ */
+ static Reference parseFrom(final FSDataInputStream in) throws IOException {
+ // I need to be able to move back in the stream if this is not a pb serialization so I can
+ // do the Writable decoding instead.
+ InputStream is = in.markSupported()? in: new BufferedInputStream(in);
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ is.mark(pblen);
+ byte [] pbuf = new byte[pblen];
+ int read = is.read(pbuf);
+ if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen);
+ if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
+ return convert(FSProtos.Reference.parseDelimitedFrom(is));
+ } else {
+ // Presume Writables. Need to reset the stream since it didn't start w/ pb.
+ // We won't bother rewriting thie Reference as a pb since Reference is transitory.
+ in.reset();
Reference r = new Reference();
r.readFields(in);
return r;
- } finally {
- in.close();
}
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 9e4ada9..e581d45 100644
--- a/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ b/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.ServerCallable;
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.io.Reference.Range;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
@@ -93,10 +92,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LoadIncrementalHFiles extends Configured implements Tool {
-
private static Log LOG = LogFactory.getLog(LoadIncrementalHFiles.class);
- private static final int TABLE_CREATE_MAX_RETRIES = 20;
- private static final long TABLE_CREATE_SLEEP = 60000;
static AtomicLong regionCount = new AtomicLong(0);
private HBaseAdmin hbAdmin;
private Configuration cfg;
@@ -519,8 +515,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
Path bottomOut, Path topOut) throws IOException
{
// Open reader with no block cache, and not in-memory
- Reference topReference = new Reference(splitKey, Range.top);
- Reference bottomReference = new Reference(splitKey, Range.bottom);
+ Reference topReference = Reference.createTopReference(splitKey);
+ Reference bottomReference = Reference.createBottomReference(splitKey);
copyHFileHalf(conf, inFile, topOut, topReference, familyDesc);
copyHFileHalf(conf, inFile, bottomOut, bottomReference, familyDesc);
diff --git a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 947ec5f..0069c11 100644
--- a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -105,7 +105,7 @@ import org.apache.hadoop.hbase.util.Sleeper;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo;
-import org.apache.hadoop.hbase.zookeeper.ClusterId;
+import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
@@ -547,7 +547,7 @@ Server {
// publish cluster ID
status.setStatus("Publishing Cluster ID in ZooKeeper");
- ClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
+ ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.executorService = new ExecutorService(getServerName().toString());
@@ -1503,7 +1503,7 @@ Server {
}});
return new ClusterStatus(VersionInfo.getVersion(),
- this.fileSystemManager.getClusterId(),
+ this.fileSystemManager.getClusterId().toString(),
this.serverManager.getOnlineServers(),
this.serverManager.getDeadServers(),
this.serverName,
@@ -1513,7 +1513,7 @@ Server {
}
public String getClusterId() {
- return fileSystemManager.getClusterId();
+ return fileSystemManager.getClusterId().toString();
}
/**
diff --git a/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 5052878..5fdbaaa 100644
--- a/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -67,7 +69,7 @@ public class MasterFileSystem {
// metrics for master
MasterMetrics metrics;
// Persisted unique cluster ID
- private String clusterId;
+ private ClusterId clusterId;
// Keep around for convenience.
private final FileSystem fs;
// Is the fileystem ok?
@@ -178,7 +180,7 @@ public class MasterFileSystem {
/**
* @return The unique identifier generated for this cluster
*/
- public String getClusterId() {
+ public ClusterId getClusterId() {
return clusterId;
}
@@ -322,8 +324,7 @@ public class MasterFileSystem {
final FileSystem fs)
throws IOException {
// If FS is in safe mode wait till out of it.
- FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
- 10 * 1000));
+ FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
// Filesystem is good. Go ahead and check for hbase.rootdir.
try {
if (!fs.exists(rd)) {
@@ -336,17 +337,22 @@ public class MasterFileSystem {
// there is one datanode it will succeed. Permission problems should have
// already been caught by mkdirs above.
FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
- 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
- HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
+ 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
+ HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
} else {
if (!fs.isDirectory(rd)) {
throw new IllegalArgumentException(rd.toString() + " is not a directory");
}
// as above
FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
- 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
- HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
+ 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
+ HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
}
+ } catch (DeserializationException de) {
+ LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
+ IOException ioe = new IOException();
+ ioe.initCause(de);
+ throw ioe;
} catch (IllegalArgumentException iae) {
LOG.fatal("Please fix invalid configuration for "
+ HConstants.HBASE_DIR + " " + rd.toString(), iae);
@@ -355,8 +361,7 @@ public class MasterFileSystem {
// Make sure cluster ID exists
if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
- FSUtils.setClusterId(fs, rd, UUID.randomUUID().toString(), c.getInt(
- HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
+ FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
}
clusterId = FSUtils.getClusterId(fs, rd);
diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index ccc964e..3b7081e 100644
--- a/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -103,6 +103,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -110,6 +111,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import com.google.protobuf.ByteString;
+import com.google.protobuf.Message;
import com.google.protobuf.ServiceException;
/**
@@ -162,7 +164,7 @@ public final class ProtobufUtil {
* @return True if passed bytes has {@link #PB_MAGIC} for a prefix.
*/
public static boolean isPBMagicPrefix(final byte [] bytes) {
- if (bytes == null || bytes.length <= PB_MAGIC.length) return false;
+ if (bytes == null || bytes.length < PB_MAGIC.length) return false;
return Bytes.compareTo(PB_MAGIC, 0, PB_MAGIC.length, bytes, 0, PB_MAGIC.length) == 0;
}
@@ -288,63 +290,6 @@ public final class ProtobufUtil {
}
/**
- * Convert a RegionInfo to a HRegionInfo
- *
- * @param proto the RegionInfo to convert
- * @return the converted HRegionInfo
- */
- public static HRegionInfo toRegionInfo(final RegionInfo proto) {
- if (proto == null) return null;
- byte[] tableName = proto.getTableName().toByteArray();
- if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
- return HRegionInfo.ROOT_REGIONINFO;
- } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
- return HRegionInfo.FIRST_META_REGIONINFO;
- }
- long regionId = proto.getRegionId();
- byte[] startKey = null;
- byte[] endKey = null;
- if (proto.hasStartKey()) {
- startKey = proto.getStartKey().toByteArray();
- }
- if (proto.hasEndKey()) {
- endKey = proto.getEndKey().toByteArray();
- }
- boolean split = false;
- if (proto.hasSplit()) {
- split = proto.getSplit();
- }
- HRegionInfo hri = new HRegionInfo(tableName,
- startKey, endKey, split, regionId);
- if (proto.hasOffline()) {
- hri.setOffline(proto.getOffline());
- }
- return hri;
- }
-
- /**
- * Convert a HRegionInfo to a RegionInfo
- *
- * @param info the HRegionInfo to convert
- * @return the converted RegionInfo
- */
- public static RegionInfo toRegionInfo(final HRegionInfo info) {
- if (info == null) return null;
- RegionInfo.Builder builder = RegionInfo.newBuilder();
- builder.setTableName(ByteString.copyFrom(info.getTableName()));
- builder.setRegionId(info.getRegionId());
- if (info.getStartKey() != null) {
- builder.setStartKey(ByteString.copyFrom(info.getStartKey()));
- }
- if (info.getEndKey() != null) {
- builder.setEndKey(ByteString.copyFrom(info.getEndKey()));
- }
- builder.setOffline(info.isOffline());
- builder.setSplit(info.isSplit());
- return builder.build();
- }
-
- /**
* Convert a protocol buffer Get to a client Get
*
* @param get the protocol buffer Get to convert
@@ -1234,7 +1179,7 @@ public final class ProtobufUtil {
RequestConverter.buildGetRegionInfoRequest(regionName);
GetRegionInfoResponse response =
admin.getRegionInfo(null, request);
- return toRegionInfo(response.getRegionInfo());
+ return HRegionInfo.convert(response.getRegionInfo());
} catch (ServiceException se) {
throw getRemoteException(se);
}
@@ -1349,23 +1294,30 @@ public final class ProtobufUtil {
* @return a list of online region info
* @throws IOException
*/
- public static List getOnlineRegions(
- final AdminProtocol admin) throws IOException {
+ public static List getOnlineRegions(final AdminProtocol admin) throws IOException {
GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest();
- List regions = null;
+ GetOnlineRegionResponse response = null;
try {
- GetOnlineRegionResponse response =
- admin.getOnlineRegion(null, request);
- regions = new ArrayList();
- if (response != null) { // it can be null only mockup testing region sever
- for (RegionInfo regionInfo: response.getRegionInfoList()) {
- regions.add(toRegionInfo(regionInfo));
- }
- }
- return regions;
+ response = admin.getOnlineRegion(null, request);
} catch (ServiceException se) {
throw getRemoteException(se);
}
+ return getRegionInfos(response);
+ }
+
+ /**
+ * Get the list of region info from a GetOnlineRegionResponse
+ *
+ * @param proto the GetOnlineRegionResponse
+ * @return the list of region info or null if proto is null
+ */
+ static List getRegionInfos(final GetOnlineRegionResponse proto) {
+ if (proto == null) return null;
+ List regionInfos = new ArrayList();
+ for (RegionInfo regionInfo: proto.getRegionInfoList()) {
+ regionInfos.add(HRegionInfo.convert(regionInfo));
+ }
+ return regionInfos;
}
/**
@@ -1459,4 +1411,16 @@ public final class ProtobufUtil {
return rl.getReadRequestsCount() + rl.getWriteRequestsCount();
}
+
+
+ /**
+ * @param m Message to get delimited pb serialization of (with pb magic prefix)
+ */
+ public static byte [] toDelimitedByteArray(final Message m) throws IOException {
+ // Allocate arbitrary big size so we avoid resizing.
+ ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
+ m.writeDelimitedTo(baos);
+ baos.close();
+ return ProtobufUtil.prependPBMagic(baos.toByteArray());
+ }
}
diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
index dabfbab..5fb87b0 100644
--- a/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
+++ b/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
@@ -575,7 +575,7 @@ public final class RequestConverter {
buildOpenRegionRequest(final List regions) {
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
for (HRegionInfo region: regions) {
- builder.addRegion(ProtobufUtil.toRegionInfo(region));
+ builder.addRegion(HRegionInfo.convert(region));
}
return builder.build();
}
@@ -601,7 +601,7 @@ public final class RequestConverter {
public static OpenRegionRequest buildOpenRegionRequest(
final HRegionInfo region, final int versionOfOfflineNode) {
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
- builder.addRegion(ProtobufUtil.toRegionInfo(region));
+ builder.addRegion(HRegionInfo.convert(region));
if (versionOfOfflineNode >= 0) {
builder.setVersionOfOfflineNode(versionOfOfflineNode);
}
diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
index 45cb6cf..320196b 100644
--- a/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
+++ b/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
@@ -138,14 +138,9 @@ public final class ResponseConverter {
* @param proto the GetOnlineRegionResponse
* @return the list of region info
*/
- public static List getRegionInfos
- (final GetOnlineRegionResponse proto) {
+ public static List getRegionInfos(final GetOnlineRegionResponse proto) {
if (proto == null || proto.getRegionInfoCount() == 0) return null;
- List regionInfos = new ArrayList();
- for (RegionInfo regionInfo: proto.getRegionInfoList()) {
- regionInfos.add(ProtobufUtil.toRegionInfo(regionInfo));
- }
- return regionInfos;
+ return ProtobufUtil.getRegionInfos(proto);
}
/**
@@ -202,7 +197,7 @@ public final class ResponseConverter {
final List regions) {
GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder();
for (HRegionInfo region: regions) {
- builder.addRegionInfo(ProtobufUtil.toRegionInfo(region));
+ builder.addRegionInfo(HRegionInfo.convert(region));
}
return builder.build();
}
diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterIdProtos.java b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterIdProtos.java
new file mode 100644
index 0000000..aac3b80
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterIdProtos.java
@@ -0,0 +1,468 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: ClusterId.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class ClusterIdProtos {
+ private ClusterIdProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface ClusterIdOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string clusterId = 1;
+ boolean hasClusterId();
+ String getClusterId();
+ }
+ public static final class ClusterId extends
+ com.google.protobuf.GeneratedMessage
+ implements ClusterIdOrBuilder {
+ // Use ClusterId.newBuilder() to construct.
+ private ClusterId(Builder builder) {
+ super(builder);
+ }
+ private ClusterId(boolean noInit) {}
+
+ private static final ClusterId defaultInstance;
+ public static ClusterId getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ClusterId getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.internal_static_ClusterId_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.internal_static_ClusterId_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required string clusterId = 1;
+ public static final int CLUSTERID_FIELD_NUMBER = 1;
+ private java.lang.Object clusterId_;
+ public boolean hasClusterId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getClusterId() {
+ java.lang.Object ref = clusterId_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ clusterId_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getClusterIdBytes() {
+ java.lang.Object ref = clusterId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ clusterId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ clusterId_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasClusterId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getClusterIdBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getClusterIdBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId other = (org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId) obj;
+
+ boolean result = true;
+ result = result && (hasClusterId() == other.hasClusterId());
+ if (hasClusterId()) {
+ result = result && getClusterId()
+ .equals(other.getClusterId());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasClusterId()) {
+ hash = (37 * hash) + CLUSTERID_FIELD_NUMBER;
+ hash = (53 * hash) + getClusterId().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.internal_static_ClusterId_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.internal_static_ClusterId_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ clusterId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId build() {
+ org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId result = new org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.clusterId_ = clusterId_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance()) return this;
+ if (other.hasClusterId()) {
+ setClusterId(other.getClusterId());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasClusterId()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ clusterId_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required string clusterId = 1;
+ private java.lang.Object clusterId_ = "";
+ public boolean hasClusterId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getClusterId() {
+ java.lang.Object ref = clusterId_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ clusterId_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
+ }
+ public Builder setClusterId(String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ clusterId_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearClusterId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ clusterId_ = getDefaultInstance().getClusterId();
+ onChanged();
+ return this;
+ }
+ void setClusterId(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000001;
+ clusterId_ = value;
+ onChanged();
+ }
+
+ // @@protoc_insertion_point(builder_scope:ClusterId)
+ }
+
+ static {
+ defaultInstance = new ClusterId(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ClusterId)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ClusterId_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ClusterId_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\017ClusterId.proto\"\036\n\tClusterId\022\021\n\tcluste" +
+ "rId\030\001 \002(\tBB\n*org.apache.hadoop.hbase.pro" +
+ "tobuf.generatedB\017ClusterIdProtosH\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_ClusterId_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_ClusterId_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_ClusterId_descriptor,
+ new java.lang.String[] { "ClusterId", },
+ org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.class,
+ org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/FSProtos.java b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/FSProtos.java
new file mode 100644
index 0000000..79d13c1
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/FSProtos.java
@@ -0,0 +1,1018 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: FS.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class FSProtos {
+ private FSProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface HBaseVersionFileContentOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string version = 1;
+ boolean hasVersion();
+ String getVersion();
+ }
+ public static final class HBaseVersionFileContent extends
+ com.google.protobuf.GeneratedMessage
+ implements HBaseVersionFileContentOrBuilder {
+ // Use HBaseVersionFileContent.newBuilder() to construct.
+ private HBaseVersionFileContent(Builder builder) {
+ super(builder);
+ }
+ private HBaseVersionFileContent(boolean noInit) {}
+
+ private static final HBaseVersionFileContent defaultInstance;
+ public static HBaseVersionFileContent getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public HBaseVersionFileContent getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.internal_static_HBaseVersionFileContent_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.internal_static_HBaseVersionFileContent_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required string version = 1;
+ public static final int VERSION_FIELD_NUMBER = 1;
+ private java.lang.Object version_;
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getVersion() {
+ java.lang.Object ref = version_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ version_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getVersionBytes() {
+ java.lang.Object ref = version_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ version_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ version_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasVersion()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getVersionBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getVersionBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent other = (org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent) obj;
+
+ boolean result = true;
+ result = result && (hasVersion() == other.hasVersion());
+ if (hasVersion()) {
+ result = result && getVersion()
+ .equals(other.getVersion());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasVersion()) {
+ hash = (37 * hash) + VERSION_FIELD_NUMBER;
+ hash = (53 * hash) + getVersion().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.internal_static_HBaseVersionFileContent_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.internal_static_HBaseVersionFileContent_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ version_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent build() {
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent result = new org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.version_ = version_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance()) return this;
+ if (other.hasVersion()) {
+ setVersion(other.getVersion());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasVersion()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ version_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required string version = 1;
+ private java.lang.Object version_ = "";
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getVersion() {
+ java.lang.Object ref = version_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ version_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
+ }
+ public Builder setVersion(String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ version_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearVersion() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ version_ = getDefaultInstance().getVersion();
+ onChanged();
+ return this;
+ }
+ void setVersion(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000001;
+ version_ = value;
+ onChanged();
+ }
+
+ // @@protoc_insertion_point(builder_scope:HBaseVersionFileContent)
+ }
+
+ static {
+ defaultInstance = new HBaseVersionFileContent(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:HBaseVersionFileContent)
+ }
+
+ public interface ReferenceOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes splitkey = 1;
+ boolean hasSplitkey();
+ com.google.protobuf.ByteString getSplitkey();
+
+ // required .Reference.Range range = 2;
+ boolean hasRange();
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range getRange();
+ }
+ public static final class Reference extends
+ com.google.protobuf.GeneratedMessage
+ implements ReferenceOrBuilder {
+ // Use Reference.newBuilder() to construct.
+ private Reference(Builder builder) {
+ super(builder);
+ }
+ private Reference(boolean noInit) {}
+
+ private static final Reference defaultInstance;
+ public static Reference getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Reference getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.internal_static_Reference_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.internal_static_Reference_fieldAccessorTable;
+ }
+
+ public enum Range
+ implements com.google.protobuf.ProtocolMessageEnum {
+ TOP(0, 0),
+ BOTTOM(1, 1),
+ ;
+
+ public static final int TOP_VALUE = 0;
+ public static final int BOTTOM_VALUE = 1;
+
+
+ public final int getNumber() { return value; }
+
+ public static Range valueOf(int value) {
+ switch (value) {
+ case 0: return TOP;
+ case 1: return BOTTOM;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public Range findValueByNumber(int number) {
+ return Range.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final Range[] VALUES = {
+ TOP, BOTTOM,
+ };
+
+ public static Range valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private Range(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:Reference.Range)
+ }
+
+ private int bitField0_;
+ // required bytes splitkey = 1;
+ public static final int SPLITKEY_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString splitkey_;
+ public boolean hasSplitkey() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getSplitkey() {
+ return splitkey_;
+ }
+
+ // required .Reference.Range range = 2;
+ public static final int RANGE_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range range_;
+ public boolean hasRange() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range getRange() {
+ return range_;
+ }
+
+ private void initFields() {
+ splitkey_ = com.google.protobuf.ByteString.EMPTY;
+ range_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range.TOP;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasSplitkey()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasRange()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, splitkey_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeEnum(2, range_.getNumber());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, splitkey_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(2, range_.getNumber());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference other = (org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference) obj;
+
+ boolean result = true;
+ result = result && (hasSplitkey() == other.hasSplitkey());
+ if (hasSplitkey()) {
+ result = result && getSplitkey()
+ .equals(other.getSplitkey());
+ }
+ result = result && (hasRange() == other.hasRange());
+ if (hasRange()) {
+ result = result &&
+ (getRange() == other.getRange());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasSplitkey()) {
+ hash = (37 * hash) + SPLITKEY_FIELD_NUMBER;
+ hash = (53 * hash) + getSplitkey().hashCode();
+ }
+ if (hasRange()) {
+ hash = (37 * hash) + RANGE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getRange());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.internal_static_Reference_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.internal_static_Reference_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ splitkey_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ range_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range.TOP;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference build() {
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference result = new org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.splitkey_ = splitkey_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.range_ = range_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance()) return this;
+ if (other.hasSplitkey()) {
+ setSplitkey(other.getSplitkey());
+ }
+ if (other.hasRange()) {
+ setRange(other.getRange());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasSplitkey()) {
+
+ return false;
+ }
+ if (!hasRange()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ splitkey_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range value = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(2, rawValue);
+ } else {
+ bitField0_ |= 0x00000002;
+ range_ = value;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required bytes splitkey = 1;
+ private com.google.protobuf.ByteString splitkey_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasSplitkey() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getSplitkey() {
+ return splitkey_;
+ }
+ public Builder setSplitkey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ splitkey_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearSplitkey() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ splitkey_ = getDefaultInstance().getSplitkey();
+ onChanged();
+ return this;
+ }
+
+ // required .Reference.Range range = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range range_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range.TOP;
+ public boolean hasRange() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range getRange() {
+ return range_;
+ }
+ public Builder setRange(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ range_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearRange() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ range_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Range.TOP;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:Reference)
+ }
+
+ static {
+ defaultInstance = new Reference(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:Reference)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_HBaseVersionFileContent_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_HBaseVersionFileContent_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_Reference_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_Reference_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\010FS.proto\"*\n\027HBaseVersionFileContent\022\017\n" +
+ "\007version\030\001 \002(\t\"\\\n\tReference\022\020\n\010splitkey\030" +
+ "\001 \002(\014\022\037\n\005range\030\002 \002(\0162\020.Reference.Range\"\034" +
+ "\n\005Range\022\007\n\003TOP\020\000\022\n\n\006BOTTOM\020\001B;\n*org.apac" +
+ "he.hadoop.hbase.protobuf.generatedB\010FSPr" +
+ "otosH\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_HBaseVersionFileContent_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_HBaseVersionFileContent_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_HBaseVersionFileContent_descriptor,
+ new java.lang.String[] { "Version", },
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.class,
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder.class);
+ internal_static_Reference_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_Reference_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_Reference_descriptor,
+ new java.lang.String[] { "Splitkey", "Range", },
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.class,
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index 058c006..5eff2eb 100644
--- a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -89,6 +89,2553 @@ public final class HBaseProtos {
// @@protoc_insertion_point(enum_scope:KeyType)
}
+ public interface TableSchemaOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional bytes name = 1;
+ boolean hasName();
+ com.google.protobuf.ByteString getName();
+
+ // repeated .TableSchema.Attribute attributes = 2;
+ java.util.List
+ getAttributesList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute getAttributes(int index);
+ int getAttributesCount();
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder>
+ getAttributesOrBuilderList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder getAttributesOrBuilder(
+ int index);
+
+ // repeated .ColumnFamilySchema columnFamilies = 3;
+ java.util.List
+ getColumnFamiliesList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index);
+ int getColumnFamiliesCount();
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
+ getColumnFamiliesOrBuilderList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder(
+ int index);
+ }
+ public static final class TableSchema extends
+ com.google.protobuf.GeneratedMessage
+ implements TableSchemaOrBuilder {
+ // Use TableSchema.newBuilder() to construct.
+ private TableSchema(Builder builder) {
+ super(builder);
+ }
+ private TableSchema(boolean noInit) {}
+
+ private static final TableSchema defaultInstance;
+ public static TableSchema getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableSchema getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_fieldAccessorTable;
+ }
+
+ public interface AttributeOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes name = 1;
+ boolean hasName();
+ com.google.protobuf.ByteString getName();
+
+ // required bytes value = 2;
+ boolean hasValue();
+ com.google.protobuf.ByteString getValue();
+ }
+ public static final class Attribute extends
+ com.google.protobuf.GeneratedMessage
+ implements AttributeOrBuilder {
+ // Use Attribute.newBuilder() to construct.
+ private Attribute(Builder builder) {
+ super(builder);
+ }
+ private Attribute(boolean noInit) {}
+
+ private static final Attribute defaultInstance;
+ public static Attribute getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Attribute getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_Attribute_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required bytes name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString name_;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+
+ // required bytes value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private com.google.protobuf.ByteString value_;
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public com.google.protobuf.ByteString getValue() {
+ return value_;
+ }
+
+ private void initFields() {
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ value_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasValue()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, name_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, value_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, name_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, value_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute) obj;
+
+ boolean result = true;
+ result = result && (hasName() == other.hasName());
+ if (hasName()) {
+ result = result && getName()
+ .equals(other.getName());
+ }
+ result = result && (hasValue() == other.hasValue());
+ if (hasValue()) {
+ result = result && getValue()
+ .equals(other.getValue());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasName()) {
+ hash = (37 * hash) + NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getName().hashCode();
+ }
+ if (hasValue()) {
+ hash = (37 * hash) + VALUE_FIELD_NUMBER;
+ hash = (53 * hash) + getValue().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_Attribute_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute build() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ if (!hasValue()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ value_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required bytes name = 1;
+ private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+ public Builder setName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+
+ // required bytes value = 2;
+ private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public com.google.protobuf.ByteString getValue() {
+ return value_;
+ }
+ public Builder setValue(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = getDefaultInstance().getValue();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:TableSchema.Attribute)
+ }
+
+ static {
+ defaultInstance = new Attribute(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:TableSchema.Attribute)
+ }
+
+ private int bitField0_;
+ // optional bytes name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString name_;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+
+ // repeated .TableSchema.Attribute attributes = 2;
+ public static final int ATTRIBUTES_FIELD_NUMBER = 2;
+ private java.util.List attributes_;
+ public java.util.List getAttributesList() {
+ return attributes_;
+ }
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder>
+ getAttributesOrBuilderList() {
+ return attributes_;
+ }
+ public int getAttributesCount() {
+ return attributes_.size();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute getAttributes(int index) {
+ return attributes_.get(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder getAttributesOrBuilder(
+ int index) {
+ return attributes_.get(index);
+ }
+
+ // repeated .ColumnFamilySchema columnFamilies = 3;
+ public static final int COLUMNFAMILIES_FIELD_NUMBER = 3;
+ private java.util.List columnFamilies_;
+ public java.util.List getColumnFamiliesList() {
+ return columnFamilies_;
+ }
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
+ getColumnFamiliesOrBuilderList() {
+ return columnFamilies_;
+ }
+ public int getColumnFamiliesCount() {
+ return columnFamilies_.size();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index) {
+ return columnFamilies_.get(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder(
+ int index) {
+ return columnFamilies_.get(index);
+ }
+
+ private void initFields() {
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ attributes_ = java.util.Collections.emptyList();
+ columnFamilies_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getAttributesCount(); i++) {
+ if (!getAttributes(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getColumnFamiliesCount(); i++) {
+ if (!getColumnFamilies(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, name_);
+ }
+ for (int i = 0; i < attributes_.size(); i++) {
+ output.writeMessage(2, attributes_.get(i));
+ }
+ for (int i = 0; i < columnFamilies_.size(); i++) {
+ output.writeMessage(3, columnFamilies_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, name_);
+ }
+ for (int i = 0; i < attributes_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, attributes_.get(i));
+ }
+ for (int i = 0; i < columnFamilies_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, columnFamilies_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema) obj;
+
+ boolean result = true;
+ result = result && (hasName() == other.hasName());
+ if (hasName()) {
+ result = result && getName()
+ .equals(other.getName());
+ }
+ result = result && getAttributesList()
+ .equals(other.getAttributesList());
+ result = result && getColumnFamiliesList()
+ .equals(other.getColumnFamiliesList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasName()) {
+ hash = (37 * hash) + NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getName().hashCode();
+ }
+ if (getAttributesCount() > 0) {
+ hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER;
+ hash = (53 * hash) + getAttributesList().hashCode();
+ }
+ if (getColumnFamiliesCount() > 0) {
+ hash = (37 * hash) + COLUMNFAMILIES_FIELD_NUMBER;
+ hash = (53 * hash) + getColumnFamiliesList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getAttributesFieldBuilder();
+ getColumnFamiliesFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (attributesBuilder_ == null) {
+ attributes_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ attributesBuilder_.clear();
+ }
+ if (columnFamiliesBuilder_ == null) {
+ columnFamilies_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ columnFamiliesBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema build() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (attributesBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ attributes_ = java.util.Collections.unmodifiableList(attributes_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.attributes_ = attributes_;
+ } else {
+ result.attributes_ = attributesBuilder_.build();
+ }
+ if (columnFamiliesBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ columnFamilies_ = java.util.Collections.unmodifiableList(columnFamilies_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.columnFamilies_ = columnFamilies_;
+ } else {
+ result.columnFamilies_ = columnFamiliesBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (attributesBuilder_ == null) {
+ if (!other.attributes_.isEmpty()) {
+ if (attributes_.isEmpty()) {
+ attributes_ = other.attributes_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureAttributesIsMutable();
+ attributes_.addAll(other.attributes_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.attributes_.isEmpty()) {
+ if (attributesBuilder_.isEmpty()) {
+ attributesBuilder_.dispose();
+ attributesBuilder_ = null;
+ attributes_ = other.attributes_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ attributesBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getAttributesFieldBuilder() : null;
+ } else {
+ attributesBuilder_.addAllMessages(other.attributes_);
+ }
+ }
+ }
+ if (columnFamiliesBuilder_ == null) {
+ if (!other.columnFamilies_.isEmpty()) {
+ if (columnFamilies_.isEmpty()) {
+ columnFamilies_ = other.columnFamilies_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureColumnFamiliesIsMutable();
+ columnFamilies_.addAll(other.columnFamilies_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.columnFamilies_.isEmpty()) {
+ if (columnFamiliesBuilder_.isEmpty()) {
+ columnFamiliesBuilder_.dispose();
+ columnFamiliesBuilder_ = null;
+ columnFamilies_ = other.columnFamilies_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ columnFamiliesBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getColumnFamiliesFieldBuilder() : null;
+ } else {
+ columnFamiliesBuilder_.addAllMessages(other.columnFamilies_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getAttributesCount(); i++) {
+ if (!getAttributes(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getColumnFamiliesCount(); i++) {
+ if (!getColumnFamilies(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addAttributes(subBuilder.buildPartial());
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addColumnFamilies(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // optional bytes name = 1;
+ private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+ public Builder setName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+
+ // repeated .TableSchema.Attribute attributes = 2;
+ private java.util.List attributes_ =
+ java.util.Collections.emptyList();
+ private void ensureAttributesIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ attributes_ = new java.util.ArrayList(attributes_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder> attributesBuilder_;
+
+ public java.util.List getAttributesList() {
+ if (attributesBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(attributes_);
+ } else {
+ return attributesBuilder_.getMessageList();
+ }
+ }
+ public int getAttributesCount() {
+ if (attributesBuilder_ == null) {
+ return attributes_.size();
+ } else {
+ return attributesBuilder_.getCount();
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute getAttributes(int index) {
+ if (attributesBuilder_ == null) {
+ return attributes_.get(index);
+ } else {
+ return attributesBuilder_.getMessage(index);
+ }
+ }
+ public Builder setAttributes(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute value) {
+ if (attributesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttributesIsMutable();
+ attributes_.set(index, value);
+ onChanged();
+ } else {
+ attributesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ public Builder setAttributes(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder builderForValue) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ attributes_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ attributesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAttributes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute value) {
+ if (attributesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttributesIsMutable();
+ attributes_.add(value);
+ onChanged();
+ } else {
+ attributesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ public Builder addAttributes(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute value) {
+ if (attributesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttributesIsMutable();
+ attributes_.add(index, value);
+ onChanged();
+ } else {
+ attributesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ public Builder addAttributes(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder builderForValue) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ attributes_.add(builderForValue.build());
+ onChanged();
+ } else {
+ attributesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAttributes(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder builderForValue) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ attributes_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ attributesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAllAttributes(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute> values) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ super.addAll(values, attributes_);
+ onChanged();
+ } else {
+ attributesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ public Builder clearAttributes() {
+ if (attributesBuilder_ == null) {
+ attributes_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ attributesBuilder_.clear();
+ }
+ return this;
+ }
+ public Builder removeAttributes(int index) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ attributes_.remove(index);
+ onChanged();
+ } else {
+ attributesBuilder_.remove(index);
+ }
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder getAttributesBuilder(
+ int index) {
+ return getAttributesFieldBuilder().getBuilder(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder getAttributesOrBuilder(
+ int index) {
+ if (attributesBuilder_ == null) {
+ return attributes_.get(index); } else {
+ return attributesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder>
+ getAttributesOrBuilderList() {
+ if (attributesBuilder_ != null) {
+ return attributesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(attributes_);
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder addAttributesBuilder() {
+ return getAttributesFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.getDefaultInstance());
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder addAttributesBuilder(
+ int index) {
+ return getAttributesFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.getDefaultInstance());
+ }
+ public java.util.List
+ getAttributesBuilderList() {
+ return getAttributesFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder>
+ getAttributesFieldBuilder() {
+ if (attributesBuilder_ == null) {
+ attributesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.AttributeOrBuilder>(
+ attributes_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ attributes_ = null;
+ }
+ return attributesBuilder_;
+ }
+
+ // repeated .ColumnFamilySchema columnFamilies = 3;
+ private java.util.List columnFamilies_ =
+ java.util.Collections.emptyList();
+ private void ensureColumnFamiliesIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ columnFamilies_ = new java.util.ArrayList(columnFamilies_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> columnFamiliesBuilder_;
+
+ public java.util.List getColumnFamiliesList() {
+ if (columnFamiliesBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(columnFamilies_);
+ } else {
+ return columnFamiliesBuilder_.getMessageList();
+ }
+ }
+ public int getColumnFamiliesCount() {
+ if (columnFamiliesBuilder_ == null) {
+ return columnFamilies_.size();
+ } else {
+ return columnFamiliesBuilder_.getCount();
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index) {
+ if (columnFamiliesBuilder_ == null) {
+ return columnFamilies_.get(index);
+ } else {
+ return columnFamiliesBuilder_.getMessage(index);
+ }
+ }
+ public Builder setColumnFamilies(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) {
+ if (columnFamiliesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColumnFamiliesIsMutable();
+ columnFamilies_.set(index, value);
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ public Builder setColumnFamilies(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) {
+ if (columnFamiliesBuilder_ == null) {
+ ensureColumnFamiliesIsMutable();
+ columnFamilies_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addColumnFamilies(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) {
+ if (columnFamiliesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColumnFamiliesIsMutable();
+ columnFamilies_.add(value);
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ public Builder addColumnFamilies(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) {
+ if (columnFamiliesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColumnFamiliesIsMutable();
+ columnFamilies_.add(index, value);
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ public Builder addColumnFamilies(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) {
+ if (columnFamiliesBuilder_ == null) {
+ ensureColumnFamiliesIsMutable();
+ columnFamilies_.add(builderForValue.build());
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addColumnFamilies(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) {
+ if (columnFamiliesBuilder_ == null) {
+ ensureColumnFamiliesIsMutable();
+ columnFamilies_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAllColumnFamilies(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema> values) {
+ if (columnFamiliesBuilder_ == null) {
+ ensureColumnFamiliesIsMutable();
+ super.addAll(values, columnFamilies_);
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ public Builder clearColumnFamilies() {
+ if (columnFamiliesBuilder_ == null) {
+ columnFamilies_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.clear();
+ }
+ return this;
+ }
+ public Builder removeColumnFamilies(int index) {
+ if (columnFamiliesBuilder_ == null) {
+ ensureColumnFamiliesIsMutable();
+ columnFamilies_.remove(index);
+ onChanged();
+ } else {
+ columnFamiliesBuilder_.remove(index);
+ }
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder getColumnFamiliesBuilder(
+ int index) {
+ return getColumnFamiliesFieldBuilder().getBuilder(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder(
+ int index) {
+ if (columnFamiliesBuilder_ == null) {
+ return columnFamilies_.get(index); } else {
+ return columnFamiliesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
+ getColumnFamiliesOrBuilderList() {
+ if (columnFamiliesBuilder_ != null) {
+ return columnFamiliesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(columnFamilies_);
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder addColumnFamiliesBuilder() {
+ return getColumnFamiliesFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance());
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder addColumnFamiliesBuilder(
+ int index) {
+ return getColumnFamiliesFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance());
+ }
+ public java.util.List
+ getColumnFamiliesBuilderList() {
+ return getColumnFamiliesFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
+ getColumnFamiliesFieldBuilder() {
+ if (columnFamiliesBuilder_ == null) {
+ columnFamiliesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>(
+ columnFamilies_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ columnFamilies_ = null;
+ }
+ return columnFamiliesBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:TableSchema)
+ }
+
+ static {
+ defaultInstance = new TableSchema(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:TableSchema)
+ }
+
+ public interface ColumnFamilySchemaOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes name = 1;
+ boolean hasName();
+ com.google.protobuf.ByteString getName();
+
+ // repeated .ColumnFamilySchema.Attribute attributes = 2;
+ java.util.List
+ getAttributesList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute getAttributes(int index);
+ int getAttributesCount();
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder>
+ getAttributesOrBuilderList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder getAttributesOrBuilder(
+ int index);
+ }
+ public static final class ColumnFamilySchema extends
+ com.google.protobuf.GeneratedMessage
+ implements ColumnFamilySchemaOrBuilder {
+ // Use ColumnFamilySchema.newBuilder() to construct.
+ private ColumnFamilySchema(Builder builder) {
+ super(builder);
+ }
+ private ColumnFamilySchema(boolean noInit) {}
+
+ private static final ColumnFamilySchema defaultInstance;
+ public static ColumnFamilySchema getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ColumnFamilySchema getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_fieldAccessorTable;
+ }
+
+ public interface AttributeOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes name = 1;
+ boolean hasName();
+ com.google.protobuf.ByteString getName();
+
+ // required bytes value = 2;
+ boolean hasValue();
+ com.google.protobuf.ByteString getValue();
+ }
+ public static final class Attribute extends
+ com.google.protobuf.GeneratedMessage
+ implements AttributeOrBuilder {
+ // Use Attribute.newBuilder() to construct.
+ private Attribute(Builder builder) {
+ super(builder);
+ }
+ private Attribute(boolean noInit) {}
+
+ private static final Attribute defaultInstance;
+ public static Attribute getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Attribute getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_Attribute_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required bytes name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString name_;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+
+ // required bytes value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private com.google.protobuf.ByteString value_;
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public com.google.protobuf.ByteString getValue() {
+ return value_;
+ }
+
+ private void initFields() {
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ value_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasValue()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, name_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, value_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, name_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, value_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute) obj;
+
+ boolean result = true;
+ result = result && (hasName() == other.hasName());
+ if (hasName()) {
+ result = result && getName()
+ .equals(other.getName());
+ }
+ result = result && (hasValue() == other.hasValue());
+ if (hasValue()) {
+ result = result && getValue()
+ .equals(other.getValue());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasName()) {
+ hash = (37 * hash) + NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getName().hashCode();
+ }
+ if (hasValue()) {
+ hash = (37 * hash) + VALUE_FIELD_NUMBER;
+ hash = (53 * hash) + getValue().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_Attribute_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute build() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ if (!hasValue()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ value_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required bytes name = 1;
+ private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+ public Builder setName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+
+ // required bytes value = 2;
+ private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public com.google.protobuf.ByteString getValue() {
+ return value_;
+ }
+ public Builder setValue(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = getDefaultInstance().getValue();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:ColumnFamilySchema.Attribute)
+ }
+
+ static {
+ defaultInstance = new Attribute(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ColumnFamilySchema.Attribute)
+ }
+
+ private int bitField0_;
+ // required bytes name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString name_;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+
+ // repeated .ColumnFamilySchema.Attribute attributes = 2;
+ public static final int ATTRIBUTES_FIELD_NUMBER = 2;
+ private java.util.List attributes_;
+ public java.util.List getAttributesList() {
+ return attributes_;
+ }
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder>
+ getAttributesOrBuilderList() {
+ return attributes_;
+ }
+ public int getAttributesCount() {
+ return attributes_.size();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute getAttributes(int index) {
+ return attributes_.get(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder getAttributesOrBuilder(
+ int index) {
+ return attributes_.get(index);
+ }
+
+ private void initFields() {
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ attributes_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getAttributesCount(); i++) {
+ if (!getAttributes(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, name_);
+ }
+ for (int i = 0; i < attributes_.size(); i++) {
+ output.writeMessage(2, attributes_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, name_);
+ }
+ for (int i = 0; i < attributes_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, attributes_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) obj;
+
+ boolean result = true;
+ result = result && (hasName() == other.hasName());
+ if (hasName()) {
+ result = result && getName()
+ .equals(other.getName());
+ }
+ result = result && getAttributesList()
+ .equals(other.getAttributesList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasName()) {
+ hash = (37 * hash) + NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getName().hashCode();
+ }
+ if (getAttributesCount() > 0) {
+ hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER;
+ hash = (53 * hash) + getAttributesList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getAttributesFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (attributesBuilder_ == null) {
+ attributes_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ attributesBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema build() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (attributesBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ attributes_ = java.util.Collections.unmodifiableList(attributes_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.attributes_ = attributes_;
+ } else {
+ result.attributes_ = attributesBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (attributesBuilder_ == null) {
+ if (!other.attributes_.isEmpty()) {
+ if (attributes_.isEmpty()) {
+ attributes_ = other.attributes_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureAttributesIsMutable();
+ attributes_.addAll(other.attributes_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.attributes_.isEmpty()) {
+ if (attributesBuilder_.isEmpty()) {
+ attributesBuilder_.dispose();
+ attributesBuilder_ = null;
+ attributes_ = other.attributes_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ attributesBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getAttributesFieldBuilder() : null;
+ } else {
+ attributesBuilder_.addAllMessages(other.attributes_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ for (int i = 0; i < getAttributesCount(); i++) {
+ if (!getAttributes(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addAttributes(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required bytes name = 1;
+ private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+ public Builder setName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+
+ // repeated .ColumnFamilySchema.Attribute attributes = 2;
+ private java.util.List attributes_ =
+ java.util.Collections.emptyList();
+ private void ensureAttributesIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ attributes_ = new java.util.ArrayList(attributes_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder> attributesBuilder_;
+
+ public java.util.List getAttributesList() {
+ if (attributesBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(attributes_);
+ } else {
+ return attributesBuilder_.getMessageList();
+ }
+ }
+ public int getAttributesCount() {
+ if (attributesBuilder_ == null) {
+ return attributes_.size();
+ } else {
+ return attributesBuilder_.getCount();
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute getAttributes(int index) {
+ if (attributesBuilder_ == null) {
+ return attributes_.get(index);
+ } else {
+ return attributesBuilder_.getMessage(index);
+ }
+ }
+ public Builder setAttributes(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute value) {
+ if (attributesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttributesIsMutable();
+ attributes_.set(index, value);
+ onChanged();
+ } else {
+ attributesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ public Builder setAttributes(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder builderForValue) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ attributes_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ attributesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAttributes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute value) {
+ if (attributesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttributesIsMutable();
+ attributes_.add(value);
+ onChanged();
+ } else {
+ attributesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ public Builder addAttributes(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute value) {
+ if (attributesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttributesIsMutable();
+ attributes_.add(index, value);
+ onChanged();
+ } else {
+ attributesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ public Builder addAttributes(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder builderForValue) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ attributes_.add(builderForValue.build());
+ onChanged();
+ } else {
+ attributesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAttributes(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder builderForValue) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ attributes_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ attributesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAllAttributes(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute> values) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ super.addAll(values, attributes_);
+ onChanged();
+ } else {
+ attributesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ public Builder clearAttributes() {
+ if (attributesBuilder_ == null) {
+ attributes_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ attributesBuilder_.clear();
+ }
+ return this;
+ }
+ public Builder removeAttributes(int index) {
+ if (attributesBuilder_ == null) {
+ ensureAttributesIsMutable();
+ attributes_.remove(index);
+ onChanged();
+ } else {
+ attributesBuilder_.remove(index);
+ }
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder getAttributesBuilder(
+ int index) {
+ return getAttributesFieldBuilder().getBuilder(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder getAttributesOrBuilder(
+ int index) {
+ if (attributesBuilder_ == null) {
+ return attributes_.get(index); } else {
+ return attributesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder>
+ getAttributesOrBuilderList() {
+ if (attributesBuilder_ != null) {
+ return attributesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(attributes_);
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder addAttributesBuilder() {
+ return getAttributesFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.getDefaultInstance());
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder addAttributesBuilder(
+ int index) {
+ return getAttributesFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.getDefaultInstance());
+ }
+ public java.util.List
+ getAttributesBuilderList() {
+ return getAttributesFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder>
+ getAttributesFieldBuilder() {
+ if (attributesBuilder_ == null) {
+ attributesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.AttributeOrBuilder>(
+ attributes_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ attributes_ = null;
+ }
+ return attributesBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:ColumnFamilySchema)
+ }
+
+ static {
+ defaultInstance = new ColumnFamilySchema(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ColumnFamilySchema)
+ }
+
public interface RegionInfoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -7282,6 +9829,26 @@ public final class HBaseProtos {
}
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_TableSchema_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_TableSchema_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_TableSchema_Attribute_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_TableSchema_Attribute_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ColumnFamilySchema_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ColumnFamilySchema_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ColumnFamilySchema_Attribute_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ColumnFamilySchema_Attribute_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegionInfo_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -7340,50 +9907,89 @@ public final class HBaseProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\013hbase.proto\"s\n\nRegionInfo\022\020\n\010regionId\030" +
- "\001 \002(\004\022\021\n\ttableName\030\002 \002(\014\022\020\n\010startKey\030\003 \001" +
- "(\014\022\016\n\006endKey\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005s" +
- "plit\030\006 \001(\010\"\225\001\n\017RegionSpecifier\0222\n\004type\030\001" +
- " \002(\0162$.RegionSpecifier.RegionSpecifierTy" +
- "pe\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierType" +
- "\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME" +
- "\020\002\"\270\003\n\nRegionLoad\022)\n\017regionSpecifier\030\001 \002" +
- "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
- "storefiles\030\003 \001(\r\022\037\n\027storeUncompressedSiz",
- "eMB\030\004 \001(\r\022\027\n\017storefileSizeMB\030\005 \001(\r\022\026\n\016me" +
- "mstoreSizeMB\030\006 \001(\r\022\034\n\024storefileIndexSize" +
- "MB\030\007 \001(\r\022\031\n\021readRequestsCount\030\010 \001(\004\022\032\n\022w" +
- "riteRequestsCount\030\t \001(\004\022\032\n\022totalCompacti" +
- "ngKVs\030\n \001(\004\022\033\n\023currentCompactedKVs\030\013 \001(\004" +
- "\022\027\n\017rootIndexSizeKB\030\014 \001(\r\022\036\n\026totalStatic" +
- "IndexSizeKB\030\r \001(\r\022\036\n\026totalStaticBloomSiz" +
- "eKB\030\016 \001(\r\022\"\n\014coprocessors\030\017 \003(\0132\014.Coproc" +
- "essor\"\263\001\n\nServerLoad\022\031\n\021requestsPerSecon" +
- "d\030\001 \001(\r\022\035\n\025totalNumberOfRequests\030\002 \001(\r\022\022",
- "\n\nusedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapMB\030\004 \001(\r\022 \n" +
- "\013regionLoads\030\005 \003(\0132\013.RegionLoad\022\"\n\014copro" +
- "cessors\030\006 \003(\0132\014.Coprocessor\"%\n\tTimeRange" +
- "\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"w\n\010KeyValue\022\013" +
- "\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqualifier" +
- "\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001(\004\022\031\n\007keyType\030\005 \001" +
- "(\0162\010.KeyType\022\r\n\005value\030\006 \001(\014\"?\n\nServerNam" +
- "e\022\020\n\010hostName\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\021\n\tsta" +
- "rtCode\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(" +
- "\t\"-\n\016NameStringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005val",
- "ue\030\002 \002(\t\",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022" +
- "\r\n\005value\030\002 \001(\014*_\n\007KeyType\022\013\n\007MINIMUM\020\000\022\007" +
- "\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021" +
- "\n\rDELETE_FAMILY\020\016\022\014\n\007MAXIMUM\020\377\001B>\n*org.a" +
- "pache.hadoop.hbase.protobuf.generatedB\013H" +
- "BaseProtosH\001\240\001\001"
+ "\n\013hbase.proto\"\236\001\n\013TableSchema\022\014\n\004name\030\001 " +
+ "\001(\014\022*\n\nattributes\030\002 \003(\0132\026.TableSchema.At" +
+ "tribute\022+\n\016columnFamilies\030\003 \003(\0132\023.Column" +
+ "FamilySchema\032(\n\tAttribute\022\014\n\004name\030\001 \002(\014\022" +
+ "\r\n\005value\030\002 \002(\014\"\177\n\022ColumnFamilySchema\022\014\n\004" +
+ "name\030\001 \002(\014\0221\n\nattributes\030\002 \003(\0132\035.ColumnF" +
+ "amilySchema.Attribute\032(\n\tAttribute\022\014\n\004na" +
+ "me\030\001 \002(\014\022\r\n\005value\030\002 \002(\014\"s\n\nRegionInfo\022\020\n" +
+ "\010regionId\030\001 \002(\004\022\021\n\ttableName\030\002 \002(\014\022\020\n\010st" +
+ "artKey\030\003 \001(\014\022\016\n\006endKey\030\004 \001(\014\022\017\n\007offline\030",
+ "\005 \001(\010\022\r\n\005split\030\006 \001(\010\"\225\001\n\017RegionSpecifier" +
+ "\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.RegionS" +
+ "pecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpe" +
+ "cifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_R" +
+ "EGION_NAME\020\002\"\270\003\n\nRegionLoad\022)\n\017regionSpe" +
+ "cifier\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006stores" +
+ "\030\002 \001(\r\022\022\n\nstorefiles\030\003 \001(\r\022\037\n\027storeUncom" +
+ "pressedSizeMB\030\004 \001(\r\022\027\n\017storefileSizeMB\030\005" +
+ " \001(\r\022\026\n\016memstoreSizeMB\030\006 \001(\r\022\034\n\024storefil" +
+ "eIndexSizeMB\030\007 \001(\r\022\031\n\021readRequestsCount\030",
+ "\010 \001(\004\022\032\n\022writeRequestsCount\030\t \001(\004\022\032\n\022tot" +
+ "alCompactingKVs\030\n \001(\004\022\033\n\023currentCompacte" +
+ "dKVs\030\013 \001(\004\022\027\n\017rootIndexSizeKB\030\014 \001(\r\022\036\n\026t" +
+ "otalStaticIndexSizeKB\030\r \001(\r\022\036\n\026totalStat" +
+ "icBloomSizeKB\030\016 \001(\r\022\"\n\014coprocessors\030\017 \003(" +
+ "\0132\014.Coprocessor\"\263\001\n\nServerLoad\022\031\n\021reques" +
+ "tsPerSecond\030\001 \001(\r\022\035\n\025totalNumberOfReques" +
+ "ts\030\002 \001(\r\022\022\n\nusedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapM" +
+ "B\030\004 \001(\r\022 \n\013regionLoads\030\005 \003(\0132\013.RegionLoa" +
+ "d\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\"%\n",
+ "\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"w\n\010" +
+ "KeyValue\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n" +
+ "\tqualifier\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001(\004\022\031\n\007k" +
+ "eyType\030\005 \001(\0162\010.KeyType\022\r\n\005value\030\006 \001(\014\"?\n" +
+ "\nServerName\022\020\n\010hostName\030\001 \002(\t\022\014\n\004port\030\002 " +
+ "\001(\r\022\021\n\tstartCode\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n" +
+ "\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004name\030\001 " +
+ "\002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair\022\014\n\004n" +
+ "ame\030\001 \002(\t\022\r\n\005value\030\002 \001(\014*_\n\007KeyType\022\013\n\007M" +
+ "INIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021\n\rDELETE_",
+ "COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n\007MAXIMUM\020\377" +
+ "\001B>\n*org.apache.hadoop.hbase.protobuf.ge" +
+ "neratedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
- internal_static_RegionInfo_descriptor =
+ internal_static_TableSchema_descriptor =
getDescriptor().getMessageTypes().get(0);
+ internal_static_TableSchema_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_TableSchema_descriptor,
+ new java.lang.String[] { "Name", "Attributes", "ColumnFamilies", },
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.class,
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder.class);
+ internal_static_TableSchema_Attribute_descriptor =
+ internal_static_TableSchema_descriptor.getNestedTypes().get(0);
+ internal_static_TableSchema_Attribute_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_TableSchema_Attribute_descriptor,
+ new java.lang.String[] { "Name", "Value", },
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.class,
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Attribute.Builder.class);
+ internal_static_ColumnFamilySchema_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_ColumnFamilySchema_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_ColumnFamilySchema_descriptor,
+ new java.lang.String[] { "Name", "Attributes", },
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.class,
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder.class);
+ internal_static_ColumnFamilySchema_Attribute_descriptor =
+ internal_static_ColumnFamilySchema_descriptor.getNestedTypes().get(0);
+ internal_static_ColumnFamilySchema_Attribute_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_ColumnFamilySchema_Attribute_descriptor,
+ new java.lang.String[] { "Name", "Value", },
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.class,
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Attribute.Builder.class);
+ internal_static_RegionInfo_descriptor =
+ getDescriptor().getMessageTypes().get(2);
internal_static_RegionInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionInfo_descriptor,
@@ -7391,7 +9997,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder.class);
internal_static_RegionSpecifier_descriptor =
- getDescriptor().getMessageTypes().get(1);
+ getDescriptor().getMessageTypes().get(3);
internal_static_RegionSpecifier_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionSpecifier_descriptor,
@@ -7399,7 +10005,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder.class);
internal_static_RegionLoad_descriptor =
- getDescriptor().getMessageTypes().get(2);
+ getDescriptor().getMessageTypes().get(4);
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
@@ -7407,7 +10013,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder.class);
internal_static_ServerLoad_descriptor =
- getDescriptor().getMessageTypes().get(3);
+ getDescriptor().getMessageTypes().get(5);
internal_static_ServerLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ServerLoad_descriptor,
@@ -7415,7 +10021,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder.class);
internal_static_TimeRange_descriptor =
- getDescriptor().getMessageTypes().get(4);
+ getDescriptor().getMessageTypes().get(6);
internal_static_TimeRange_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TimeRange_descriptor,
@@ -7423,7 +10029,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder.class);
internal_static_KeyValue_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(7);
internal_static_KeyValue_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_KeyValue_descriptor,
@@ -7431,7 +10037,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder.class);
internal_static_ServerName_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(8);
internal_static_ServerName_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ServerName_descriptor,
@@ -7439,7 +10045,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder.class);
internal_static_Coprocessor_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(9);
internal_static_Coprocessor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_Coprocessor_descriptor,
@@ -7447,7 +10053,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder.class);
internal_static_NameStringPair_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(10);
internal_static_NameStringPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NameStringPair_descriptor,
@@ -7455,7 +10061,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder.class);
internal_static_NameBytesPair_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(11);
internal_static_NameBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NameBytesPair_descriptor,
diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 20c7738..ae86046 100644
--- a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -954,422 +954,6 @@ public final class ZooKeeperProtos {
// @@protoc_insertion_point(class_scope:Master)
}
- public interface ClusterIdOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required string clusterId = 1;
- boolean hasClusterId();
- String getClusterId();
- }
- public static final class ClusterId extends
- com.google.protobuf.GeneratedMessage
- implements ClusterIdOrBuilder {
- // Use ClusterId.newBuilder() to construct.
- private ClusterId(Builder builder) {
- super(builder);
- }
- private ClusterId(boolean noInit) {}
-
- private static final ClusterId defaultInstance;
- public static ClusterId getDefaultInstance() {
- return defaultInstance;
- }
-
- public ClusterId getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterId_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterId_fieldAccessorTable;
- }
-
- private int bitField0_;
- // required string clusterId = 1;
- public static final int CLUSTERID_FIELD_NUMBER = 1;
- private java.lang.Object clusterId_;
- public boolean hasClusterId() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- public String getClusterId() {
- java.lang.Object ref = clusterId_;
- if (ref instanceof String) {
- return (String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- String s = bs.toStringUtf8();
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
- clusterId_ = s;
- }
- return s;
- }
- }
- private com.google.protobuf.ByteString getClusterIdBytes() {
- java.lang.Object ref = clusterId_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
- clusterId_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private void initFields() {
- clusterId_ = "";
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasClusterId()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getClusterIdBytes());
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getClusterIdBytes());
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- @java.lang.Override
- public boolean equals(final java.lang.Object obj) {
- if (obj == this) {
- return true;
- }
- if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId)) {
- return super.equals(obj);
- }
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId) obj;
-
- boolean result = true;
- result = result && (hasClusterId() == other.hasClusterId());
- if (hasClusterId()) {
- result = result && getClusterId()
- .equals(other.getClusterId());
- }
- result = result &&
- getUnknownFields().equals(other.getUnknownFields());
- return result;
- }
-
- @java.lang.Override
- public int hashCode() {
- int hash = 41;
- hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasClusterId()) {
- hash = (37 * hash) + CLUSTERID_FIELD_NUMBER;
- hash = (53 * hash) + getClusterId().hashCode();
- }
- hash = (29 * hash) + getUnknownFields().hashCode();
- return hash;
- }
-
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
- }
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder
- implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterIdOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterId_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterId_fieldAccessorTable;
- }
-
- // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- clusterId_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId.getDescriptor();
- }
-
- public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId build() {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId buildPartial() {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.clusterId_ = clusterId_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId) {
- return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId other) {
- if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId.getDefaultInstance()) return this;
- if (other.hasClusterId()) {
- setClusterId(other.getClusterId());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasClusterId()) {
-
- return false;
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- clusterId_ = input.readBytes();
- break;
- }
- }
- }
- }
-
- private int bitField0_;
-
- // required string clusterId = 1;
- private java.lang.Object clusterId_ = "";
- public boolean hasClusterId() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- public String getClusterId() {
- java.lang.Object ref = clusterId_;
- if (!(ref instanceof String)) {
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
- clusterId_ = s;
- return s;
- } else {
- return (String) ref;
- }
- }
- public Builder setClusterId(String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- clusterId_ = value;
- onChanged();
- return this;
- }
- public Builder clearClusterId() {
- bitField0_ = (bitField0_ & ~0x00000001);
- clusterId_ = getDefaultInstance().getClusterId();
- onChanged();
- return this;
- }
- void setClusterId(com.google.protobuf.ByteString value) {
- bitField0_ |= 0x00000001;
- clusterId_ = value;
- onChanged();
- }
-
- // @@protoc_insertion_point(builder_scope:ClusterId)
- }
-
- static {
- defaultInstance = new ClusterId(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:ClusterId)
- }
-
public interface ClusterUpOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -3657,11 +3241,6 @@ public final class ZooKeeperProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_Master_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
- internal_static_ClusterId_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_ClusterId_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
internal_static_ClusterUp_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -3693,20 +3272,19 @@ public final class ZooKeeperProtos {
"\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020RootRe" +
"gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" +
"%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" +
- "\n\tClusterId\022\021\n\tclusterId\030\001 \002(\t\"\036\n\tCluste" +
- "rUp\022\021\n\tstartDate\030\001 \002(\t\"\211\001\n\020RegionTransit" +
- "ion\022\025\n\reventTypeCode\030\001 \002(\r\022\022\n\nregionName" +
- "\030\002 \002(\014\022\022\n\ncreateTime\030\003 \002(\004\022%\n\020originServ" +
- "erName\030\004 \001(\0132\013.ServerName\022\017\n\007payload\030\005 \001" +
- "(\014\"\230\001\n\014SplitLogTask\022\"\n\005state\030\001 \002(\0162\023.Spl" +
- "itLogTask.State\022\037\n\nserverName\030\002 \002(\0132\013.Se",
- "rverName\"C\n\005State\022\016\n\nUNASSIGNED\020\000\022\t\n\005OWN" +
- "ED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\"n\n" +
- "\005Table\022$\n\005state\030\001 \002(\0162\014.Table.State:\007ENA" +
- "BLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001" +
- "\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003BE\n*org.apa" +
- "che.hadoop.hbase.protobuf.generatedB\017Zoo" +
- "KeeperProtosH\001\210\001\001\240\001\001"
+ "\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\211\001\n\020Regio" +
+ "nTransition\022\025\n\reventTypeCode\030\001 \002(\r\022\022\n\nre" +
+ "gionName\030\002 \002(\014\022\022\n\ncreateTime\030\003 \002(\004\022%\n\020or" +
+ "iginServerName\030\004 \001(\0132\013.ServerName\022\017\n\007pay" +
+ "load\030\005 \001(\014\"\230\001\n\014SplitLogTask\022\"\n\005state\030\001 \002" +
+ "(\0162\023.SplitLogTask.State\022\037\n\nserverName\030\002 " +
+ "\002(\0132\013.ServerName\"C\n\005State\022\016\n\nUNASSIGNED\020",
+ "\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003" +
+ "ERR\020\004\"n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.St" +
+ "ate:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DI" +
+ "SABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003BE\n" +
+ "*org.apache.hadoop.hbase.protobuf.genera" +
+ "tedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -3729,16 +3307,8 @@ public final class ZooKeeperProtos {
new java.lang.String[] { "Master", },
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class);
- internal_static_ClusterId_descriptor =
- getDescriptor().getMessageTypes().get(2);
- internal_static_ClusterId_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_ClusterId_descriptor,
- new java.lang.String[] { "ClusterId", },
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId.class,
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterId.Builder.class);
internal_static_ClusterUp_descriptor =
- getDescriptor().getMessageTypes().get(3);
+ getDescriptor().getMessageTypes().get(2);
internal_static_ClusterUp_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ClusterUp_descriptor,
@@ -3746,7 +3316,7 @@ public final class ZooKeeperProtos {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
internal_static_RegionTransition_descriptor =
- getDescriptor().getMessageTypes().get(4);
+ getDescriptor().getMessageTypes().get(3);
internal_static_RegionTransition_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionTransition_descriptor,
@@ -3754,7 +3324,7 @@ public final class ZooKeeperProtos {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.class,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder.class);
internal_static_SplitLogTask_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(4);
internal_static_SplitLogTask_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SplitLogTask_descriptor,
@@ -3762,7 +3332,7 @@ public final class ZooKeeperProtos {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.class,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder.class);
internal_static_Table_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(5);
internal_static_Table_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_Table_descriptor,
diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 9f16fee..cb3e603 100644
--- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -20,6 +20,7 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.EOFException;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.UnsupportedEncodingException;
@@ -65,6 +66,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -493,10 +495,7 @@ public class HRegion implements HeapSize { // , Writable{
*/
public long initialize(final CancelableProgressable reporter)
throws IOException {
-
- MonitoredTask status = TaskMonitor.get().createStatus(
- "Initializing region " + this);
-
+ MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this);
long nextSeqId = -1;
try {
nextSeqId = initializeRegionInternals(reporter, status);
@@ -739,8 +738,15 @@ public class HRegion implements HeapSize { // , Writable{
*/
private void checkRegioninfoOnFilesystem() throws IOException {
Path regioninfoPath = new Path(this.regiondir, REGIONINFO_FILE);
- if (this.fs.exists(regioninfoPath) &&
- this.fs.getFileStatus(regioninfoPath).getLen() > 0) {
+ // Compose the content of the file so we can compare to length in filesystem. If not same,
+ // rewrite it (it may have been written in the old format using Writables instead of pb). The
+ // pb version is much shorter -- we write now w/o the toString version -- so checking length
+ // only should be sufficient. I don't want to read the file every time to check if it pb
+ // serialized.
+ byte [] content = getDotRegionInfoFileContent(this.getRegionInfo());
+ FileStatus status = this.fs.exists(regioninfoPath)? this.fs.getFileStatus(regioninfoPath): null;
+ if (status != null && status.getLen() == content.length) {
+ // Then assume the content good and move on.
return;
}
// Create in tmpdir and then move into place in case we crash after
@@ -748,34 +754,59 @@ public class HRegion implements HeapSize { // , Writable{
// subsequent region reopens will fail the below because create is
// registered in NN.
- // first check to get the permissions
- FsPermission perms = FSUtils.getFilePermissions(fs, conf,
- HConstants.DATA_FILE_UMASK_KEY);
+ // First check to get the permissions
+ FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
- // and then create the file
+ // And then create the file
Path tmpPath = new Path(getTmpDir(), REGIONINFO_FILE);
-
- // if datanode crashes or if the RS goes down just before the close is called while trying to
+
+ // If datanode crashes or if the RS goes down just before the close is called while trying to
// close the created regioninfo file in the .tmp directory then on next
// creation we will be getting AlreadyCreatedException.
// Hence delete and create the file if exists.
if (FSUtils.isExists(fs, tmpPath)) {
FSUtils.delete(fs, tmpPath, true);
}
-
- FSDataOutputStream out = FSUtils.create(fs, tmpPath, perms);
+ FSDataOutputStream out = FSUtils.create(fs, tmpPath, perms);
try {
- this.regionInfo.write(out);
- out.write('\n');
- out.write('\n');
- out.write(Bytes.toBytes(this.regionInfo.toString()));
+ // We used to write out this file as serialized Writable followed by '\n\n' and then the
+ // toString of the HRegionInfo but now we just write out the pb serialized bytes so we can
+ // for sure tell whether the content has been pb'd or not just by looking at file length; the
+ // pb version will be shorter.
+ out.write(content);
} finally {
out.close();
}
if (!fs.rename(tmpPath, regioninfoPath)) {
- throw new IOException("Unable to rename " + tmpPath + " to " +
- regioninfoPath);
+ throw new IOException("Unable to rename " + tmpPath + " to " + regioninfoPath);
+ }
+ }
+
+ /**
+ * @param hri
+ * @return Content of the file we write out to the filesystem under a region
+ * @throws IOException
+ */
+ private static byte [] getDotRegionInfoFileContent(final HRegionInfo hri) throws IOException {
+ return hri.toDelimitedByteArray();
+ }
+
+ /**
+ * @param fs
+ * @param dir
+ * @return An HRegionInfo instance gotten from the .regioninfo file under region dir
+ * @throws IOException
+ */
+ public static HRegionInfo loadDotRegionInfoFileContent(final FileSystem fs, final Path dir)
+ throws IOException {
+ Path regioninfo = new Path(dir, HRegion.REGIONINFO_FILE);
+ if (!fs.exists(regioninfo)) throw new FileNotFoundException(regioninfo.toString());
+ FSDataInputStream in = fs.open(regioninfo);
+ try {
+ return HRegionInfo.parseFrom(in);
+ } finally {
+ in.close();
}
}
@@ -3789,10 +3820,8 @@ public class HRegion implements HeapSize { // , Writable{
if (LOG.isDebugEnabled()) {
LOG.debug("Opening region: " + info);
}
- Path dir = HTableDescriptor.getTableDir(tableDir,
- info.getTableName());
- HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
- htd, rsServices);
+ Path dir = HTableDescriptor.getTableDir(tableDir, info.getTableName());
+ HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info, htd, rsServices);
return r.openHRegion(reporter);
}
diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 6dc0517..81ba0cc 100644
--- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -3330,7 +3330,7 @@ public class HRegionServer implements ClientProtocol,
HRegion region = getRegion(request.getRegion());
HRegionInfo info = region.getRegionInfo();
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
- builder.setRegionInfo(ProtobufUtil.toRegionInfo(info));
+ builder.setRegionInfo(HRegionInfo.convert(info));
return builder.build();
} catch (IOException ie) {
throw new ServiceException(ie);
@@ -3393,8 +3393,8 @@ public class HRegionServer implements ClientProtocol,
*/
@Override
@QosPriority(priority=HIGH_QOS)
- public OpenRegionResponse openRegion(final RpcController controller,
- final OpenRegionRequest request) throws ServiceException {
+ public OpenRegionResponse openRegion(final RpcController controller, final OpenRegionRequest request)
+ throws ServiceException {
int versionOfOfflineNode = -1;
if (request.hasVersionOfOfflineNode()) {
versionOfOfflineNode = request.getVersionOfOfflineNode();
@@ -3402,13 +3402,11 @@ public class HRegionServer implements ClientProtocol,
try {
checkOpen();
requestCount.incrementAndGet();
- OpenRegionResponse.Builder
- builder = OpenRegionResponse.newBuilder();
+ OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
Map htds =
new HashMap(request.getRegionList().size());
-
for (RegionInfo regionInfo: request.getRegionList()) {
- HRegionInfo region = ProtobufUtil.toRegionInfo(regionInfo);
+ HRegionInfo region = HRegionInfo.convert(regionInfo);
checkIfRegionInTransition(region, OPEN);
HRegion onlineRegion = getFromOnlineRegions(region.getEncodedName());
diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
index 6a9f2fe..7816758 100644
--- a/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
+++ b/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
-import org.apache.hadoop.hbase.io.Reference.Range;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -640,10 +639,10 @@ public class SplitTransaction {
byte [] family = sf.getFamily();
String encoded = this.hri_a.getEncodedName();
Path storedir = Store.getStoreHomedir(splitdir, encoded, family);
- StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom);
+ StoreFile.split(fs, storedir, sf, this.splitrow, false);
encoded = this.hri_b.getEncodedName();
storedir = Store.getStoreHomedir(splitdir, encoded, family);
- StoreFile.split(fs, storedir, sf, this.splitrow, Range.top);
+ StoreFile.split(fs, storedir, sf, this.splitrow, true);
}
/**
diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 5e1e16d..fefd42a 100644
--- a/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -876,7 +876,7 @@ public class StoreFile extends SchemaConfigured {
* SOME_DIRECTORY/REGIONNAME/FAMILY.
* @param f File to split.
* @param splitRow
- * @param range
+ * @param top True if we are referring to the top half of the hfile.
* @return Path to created reference.
* @throws IOException
*/
@@ -884,10 +884,11 @@ public class StoreFile extends SchemaConfigured {
final Path splitDir,
final StoreFile f,
final byte [] splitRow,
- final Reference.Range range)
+ final boolean top)
throws IOException {
// A reference to the bottom half of the hsf store file.
- Reference r = new Reference(splitRow, range);
+ Reference r =
+ top? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
// Add the referred-to regions name as a dot separated suffix.
// See REF_NAME_PARSER regex above. The referred-to regions name is
// up in the path of the passed in f -- parentdir is family,
diff --git a/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 5050df0..2c328ea 100644
--- a/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -58,7 +58,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.replication.ReplicationZookeeper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.ClusterId;
+import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.KeeperException;
@@ -188,7 +188,7 @@ public class ReplicationSource extends Thread
this.metrics = new ReplicationSourceMetrics(peerClusterZnode);
try {
- this.clusterId = UUID.fromString(ClusterId.readClusterIdZNode(zkHelper
+ this.clusterId = UUID.fromString(ZKClusterId.readClusterIdZNode(zkHelper
.getZookeeperWatcher()));
} catch (KeeperException ke) {
throw new IOException("Could not read cluster id", ke);
@@ -250,7 +250,7 @@ public class ReplicationSource extends Thread
}
// delay this until we are in an asynchronous thread
try {
- this.peerClusterId = UUID.fromString(ClusterId
+ this.peerClusterId = UUID.fromString(ZKClusterId
.readClusterIdZNode(zkHelper.getPeerClusters().get(peerId).getZkw()));
} catch (KeeperException ke) {
this.terminate("Could not read peer's cluster id", ke);
diff --git a/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
index 049ed8d..05e13a2 100644
--- a/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
+++ b/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.zookeeper.ClusterId;
+import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKLeaderManager;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -67,7 +67,7 @@ public class AuthenticationTokenSecretManager
private long tokenMaxLifetime;
private ZKSecretWatcher zkWatcher;
private LeaderElector leaderElector;
- private ClusterId clusterId;
+ private ZKClusterId clusterId;
private Map allKeys =
new ConcurrentHashMap();
@@ -96,7 +96,7 @@ public class AuthenticationTokenSecretManager
this.tokenMaxLifetime = tokenMaxLifetime;
this.leaderElector = new LeaderElector(zk, serverName);
this.name = NAME_PREFIX+serverName;
- this.clusterId = new ClusterId(zk, zk);
+ this.clusterId = new ZKClusterId(zk, zk);
}
public void start() {
diff --git a/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index efb2b84..4a1dac5 100644
--- a/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -39,10 +39,13 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+
+import com.google.common.primitives.Ints;
/**
@@ -395,15 +398,25 @@ public class FSTableDescriptors implements TableDescriptors {
if (tableDir == null) throw new NullPointerException();
FileStatus status = getTableInfoPath(fs, tableDir);
if (status == null) return null;
+ int len = Ints.checkedCast(status.getLen());
+ byte [] content = new byte[len];
FSDataInputStream fsDataInputStream = fs.open(status.getPath());
- HTableDescriptor hTableDescriptor = null;
try {
- hTableDescriptor = new HTableDescriptor();
- hTableDescriptor.readFields(fsDataInputStream);
+ fsDataInputStream.readFully(content);
} finally {
fsDataInputStream.close();
}
- return hTableDescriptor;
+ HTableDescriptor htd = null;
+ try {
+ htd = HTableDescriptor.parseFrom(content);
+ } catch (DeserializationException e) {
+ throw new IOException("content=" + Bytes.toShort(content), e);
+ }
+ if (!ProtobufUtil.isPBMagicPrefix(content)) {
+ // Convert the file over to be pb before leaving here.
+ createTableDescriptor(fs, tableDir.getParent(), htd, true);
+ }
+ return htd;
}
/**
@@ -451,16 +464,14 @@ public class FSTableDescriptors implements TableDescriptors {
final HTableDescriptor hTableDescriptor, final Path tableDir,
final FileStatus status)
throws IOException {
- // Get temporary dir into which we'll first write a file to avoid
- // half-written file phenomeon.
+ // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
Path tmpTableDir = new Path(tableDir, ".tmp");
// What is current sequenceid? We read the current sequenceid from
// the current file. After we read it, another thread could come in and
// compete with us writing out next version of file. The below retries
// should help in this case some but its hard to do guarantees in face of
// concurrent schema edits.
- int currentSequenceid =
- status == null? 0: getTableInfoSequenceid(status.getPath());
+ int currentSequenceid = status == null? 0: getTableInfoSequenceid(status.getPath());
int sequenceid = currentSequenceid;
// Put arbitrary upperbound on how often we retry
int retries = 10;
@@ -499,15 +510,13 @@ public class FSTableDescriptors implements TableDescriptors {
return tableInfoPath;
}
- private static void writeHTD(final FileSystem fs, final Path p,
- final HTableDescriptor htd)
+ private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, false);
try {
- htd.write(out);
- out.write('\n');
- out.write('\n');
- out.write(Bytes.toBytes(htd.toString()));
+ // We used to write this file out as a serialized HTD Writable followed by two '\n's and then
+ // the toString version of HTD. Now we just write out the pb serialization.
+ out.write(htd.toByteArray());
} finally {
out.close();
}
@@ -538,8 +547,7 @@ public class FSTableDescriptors implements TableDescriptors {
final Configuration conf, boolean forceCreation)
throws IOException {
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
- return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
- forceCreation);
+ return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor, forceCreation);
}
/**
@@ -569,8 +577,7 @@ public class FSTableDescriptors implements TableDescriptors {
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor, boolean forceCreation)
throws IOException {
- FileStatus status =
- getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString());
+ FileStatus status = getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString());
if (status != null) {
LOG.info("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
diff --git a/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 3d35d3e..8e49f19 100644
--- a/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -19,10 +19,12 @@
*/
package org.apache.hadoop.hbase.util;
+import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
@@ -43,17 +45,25 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
+import com.google.common.primitives.Ints;
+import com.google.protobuf.InvalidProtocolBufferException;
+
/**
* Utility methods for interacting with the underlying file system.
*/
@@ -69,6 +79,26 @@ public abstract class FSUtils {
super();
}
+ /**
+ * We write .regioninfo and .tableinfo as delimited pb bytes followed by two carriages returns and
+ * then a String representation that is human readable. Use this method to compose the file
+ * content.
+ * @param delimitedPbBytes Byte array of delimited pb serialized object with pb magic prefix.
+ * @param str String version of the data structure we have serialized.
+ * @return What to write out as file content.
+ * @throws IOException
+ */
+ public static byte [] getDotFileContent(final byte [] delimitedPbBytes, final String str)
+ throws IOException {
+ byte [] strbytes = Bytes.toBytes(str);
+ byte [] result = new byte [delimitedPbBytes.length + 2 + strbytes.length];
+ System.arraycopy(delimitedPbBytes, 0, result, 0, delimitedPbBytes.length);
+ result[delimitedPbBytes.length] = '\n';
+ result[delimitedPbBytes.length + 1] = '\n';
+ System.arraycopy(strbytes, 0, result, delimitedPbBytes.length + 2, strbytes.length);
+ return result;
+ }
+
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
String scheme = fs.getUri().getScheme();
if (scheme == null) {
@@ -252,26 +282,75 @@ public abstract class FSUtils {
* @param rootdir root hbase directory
* @return null if no version file exists, version string otherwise.
* @throws IOException e
+ * @throws DeserializationException
*/
public static String getVersion(FileSystem fs, Path rootdir)
- throws IOException {
+ throws IOException, DeserializationException {
Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
+ FileStatus [] status = fs.listStatus(versionFile);
+ if (status == null || status.length == 0) return null;
String version = null;
- if (fs.exists(versionFile)) {
- FSDataInputStream s =
- fs.open(versionFile);
- try {
- version = DataInputStream.readUTF(s);
- } catch (EOFException eof) {
- LOG.warn("Version file was empty, odd, will try to set it.");
- } finally {
- s.close();
+ byte [] content = new byte [(int)status[0].getLen()];
+ FSDataInputStream s = fs.open(versionFile);
+ try {
+ IOUtils.readFully(s, content, 0, content.length);
+ if (ProtobufUtil.isPBMagicPrefix(content)) {
+ version = parseVersionFrom(content);
+ } else {
+ // Presume it pre-pb format.
+ InputStream is = new ByteArrayInputStream(content);
+ DataInputStream dis = new DataInputStream(is);
+ try {
+ version = dis.readUTF();
+ } finally {
+ dis.close();
+ }
+ // Update the format
+ LOG.info("Updating the hbase.version file format with version=" + version);
+ setVersion(fs, rootdir, version, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
}
+ } catch (EOFException eof) {
+ LOG.warn("Version file was empty, odd, will try to set it.");
+ } finally {
+ s.close();
}
return version;
}
/**
+ * Parse the content of the ${HBASE_ROOTDIR}/hbase.version file.
+ * @param bytes The byte content of the hbase.version file.
+ * @return The version found in the file as a String.
+ * @throws DeserializationException
+ */
+ static String parseVersionFrom(final byte [] bytes)
+ throws DeserializationException {
+ ProtobufUtil.expectPBMagicPrefix(bytes);
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ FSProtos.HBaseVersionFileContent.Builder builder =
+ FSProtos.HBaseVersionFileContent.newBuilder();
+ FSProtos.HBaseVersionFileContent fileContent;
+ try {
+ fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+ return fileContent.getVersion();
+ } catch (InvalidProtocolBufferException e) {
+ // Convert
+ throw new DeserializationException(e);
+ }
+ }
+
+ /**
+ * Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file.
+ * @param version Version to persist
+ * @return Serialized protobuf with version content and a bit of pb magic for a prefix.
+ */
+ static byte [] toVersionByteArray(final String version) {
+ FSProtos.HBaseVersionFileContent.Builder builder =
+ FSProtos.HBaseVersionFileContent.newBuilder();
+ return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
+ }
+
+ /**
* Verifies current version of file system
*
* @param fs file system
@@ -279,11 +358,11 @@ public abstract class FSUtils {
* @param message if true, issues a message on System.out
*
* @throws IOException e
+ * @throws DeserializationException
*/
- public static void checkVersion(FileSystem fs, Path rootdir,
- boolean message) throws IOException {
- checkVersion(fs, rootdir, message, 0,
- HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
+ public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
+ throws IOException, DeserializationException {
+ checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
}
/**
@@ -296,20 +375,20 @@ public abstract class FSUtils {
* @param retries number of times to retry
*
* @throws IOException e
+ * @throws DeserializationException
*/
public static void checkVersion(FileSystem fs, Path rootdir,
- boolean message, int wait, int retries) throws IOException {
+ boolean message, int wait, int retries)
+ throws IOException, DeserializationException {
String version = getVersion(fs, rootdir);
-
if (version == null) {
if (!rootRegionExists(fs, rootdir)) {
// rootDir is empty (no version file and no root region)
// just create new version file (HBASE-1195)
- FSUtils.setVersion(fs, rootdir, wait, retries);
+ setVersion(fs, rootdir, wait, retries);
return;
}
- } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
- return;
+ } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
// version is deprecated require migration
// Output on stdout so user sees it in terminal.
@@ -332,8 +411,8 @@ public abstract class FSUtils {
*/
public static void setVersion(FileSystem fs, Path rootdir)
throws IOException {
- setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
- HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
+ setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
+ HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
}
/**
@@ -367,19 +446,17 @@ public abstract class FSUtils {
while (true) {
try {
FSDataOutputStream s = fs.create(versionFile);
- s.writeUTF(version);
- LOG.debug("Created version file at " + rootdir.toString() +
- " set its version at:" + version);
+ s.write(toVersionByteArray(version));
s.close();
+ LOG.debug("Created version file at " + rootdir.toString() + " with version=" + version);
return;
} catch (IOException e) {
if (retries > 0) {
- LOG.warn("Unable to create version file at " + rootdir.toString() +
- ", retrying: " + e.getMessage());
+ LOG.warn("Unable to create version file at " + rootdir.toString() + ", retrying", e);
fs.delete(versionFile, false);
try {
if (wait > 0) {
- Thread.sleep(wait);
+ Thread.sleep(wait);
}
} catch (InterruptedException ex) {
// ignore
@@ -431,19 +508,30 @@ public abstract class FSUtils {
* @return the unique cluster identifier
* @throws IOException if reading the cluster ID file fails
*/
- public static String getClusterId(FileSystem fs, Path rootdir)
- throws IOException {
+ public static ClusterId getClusterId(FileSystem fs, Path rootdir)
+ throws IOException {
Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
- String clusterId = null;
- if (fs.exists(idPath)) {
+ ClusterId clusterId = null;
+ FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
+ if (status != null) {
+ int len = Ints.checkedCast(status.getLen());
+ byte [] content = new byte[len];
FSDataInputStream in = fs.open(idPath);
try {
- clusterId = in.readUTF();
+ in.readFully(content);
} catch (EOFException eof) {
- LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
+ LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
} finally{
in.close();
}
+ try {
+ clusterId = ClusterId.parseFrom(content);
+ } catch (DeserializationException e) {
+ throw new IOException("content=" + Bytes.toString(content), e);
+ }
+ // If not pb'd, make it so.
+ if (!ProtobufUtil.isPBMagicPrefix(content)) rewriteAsPb(fs, rootdir, idPath, clusterId);
+ return clusterId;
} else {
LOG.warn("Cluster ID file does not exist at " + idPath.toString());
}
@@ -451,6 +539,24 @@ public abstract class FSUtils {
}
/**
+ * @param cid
+ * @throws IOException
+ */
+ private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
+ final ClusterId cid)
+ throws IOException {
+ // Rewrite the file as pb. Move aside the old one first, write new
+ // then delete the moved-aside file.
+ Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
+ if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
+ setClusterId(fs, rootdir, cid, 100);
+ if (!fs.delete(movedAsideName, false)) {
+ throw new IOException("Failed delete of " + movedAsideName);
+ }
+ LOG.debug("Rewrote the hbase.id file as pb");
+ }
+
+ /**
* Writes a new unique identifier for this cluster to the "hbase.id" file
* in the HBase root directory
* @param fs the root directory FileSystem
@@ -459,23 +565,25 @@ public abstract class FSUtils {
* @param wait how long (in milliseconds) to wait between retries
* @throws IOException if writing to the FileSystem fails and no wait value
*/
- public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
+ public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
int wait) throws IOException {
while (true) {
try {
Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
FSDataOutputStream s = fs.create(filePath);
- s.writeUTF(clusterId);
- s.close();
+ try {
+ s.write(clusterId.toByteArray());
+ } finally {
+ s.close();
+ }
if (LOG.isDebugEnabled()) {
- LOG.debug("Created cluster ID file at " + filePath.toString() +
- " with ID: " + clusterId);
+ LOG.debug("Created cluster ID file at " + filePath.toString() + " with ID: " + clusterId);
}
return;
} catch (IOException ioe) {
if (wait > 0) {
LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
- ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
+ ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
try {
Thread.sleep(wait);
} catch (InterruptedException ie) {
diff --git a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 7b4f4a2..bfff0dc 100644
--- a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -43,7 +43,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -597,14 +596,7 @@ public class HBaseFsck {
// already loaded data
return;
}
-
- Path regioninfo = new Path(regionDir, HRegion.REGIONINFO_FILE);
- FileSystem fs = regioninfo.getFileSystem(conf);
-
- FSDataInputStream in = fs.open(regioninfo);
- HRegionInfo hri = new HRegionInfo();
- hri.readFields(in);
- in.close();
+ HRegionInfo hri = HRegion.loadDotRegionInfoFileContent(FileSystem.get(this.conf), regionDir);
LOG.debug("HRegionInfo read: " + hri.toString());
hbi.hdfsEntry.hri = hri;
}
diff --git a/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/src/main/java/org/apache/hadoop/hbase/util/Writables.java
index 3d20723..87a8fd7 100644
--- a/src/main/java/org/apache/hadoop/hbase/util/Writables.java
+++ b/src/main/java/org/apache/hadoop/hbase/util/Writables.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.migration.HRegionInfo090x;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.Writable;
@@ -143,6 +142,7 @@ public class Writables {
* @param bytes serialized bytes
* @return A HRegionInfo instance built out of passed bytes.
* @throws IOException e
+ * @deprecated Use {@link HRegionInfo#parseFrom(byte[])}
*/
public static HRegionInfo getHRegionInfo(final byte [] bytes)
throws IOException {
@@ -182,6 +182,7 @@ public class Writables {
* @return A HRegionInfo instance built out of passed bytes
* or null if passed bytes are null or an empty array.
* @throws IOException e
+ * @deprecated Use {@link HRegionInfo#parseFromOrNull(byte[])}
*/
public static HRegionInfo getHRegionInfoOrNull(final byte [] bytes)
throws IOException {
diff --git a/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterId.java b/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterId.java
deleted file mode 100644
index f804810..0000000
--- a/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterId.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2011 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.zookeeper;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-import com.google.protobuf.InvalidProtocolBufferException;
-
-/**
- * Publishes and synchronizes a unique identifier specific to a given HBase
- * cluster. The stored identifier is read from the file system by the active
- * master on startup, and is subsequently available to all watchers (including
- * clients).
- */
-@InterfaceAudience.Private
-public class ClusterId {
- private ZooKeeperWatcher watcher;
- private Abortable abortable;
- private String id;
-
- public ClusterId(ZooKeeperWatcher watcher, Abortable abortable) {
- this.watcher = watcher;
- this.abortable = abortable;
- }
-
- public boolean hasId() {
- return getId() != null;
- }
-
- public String getId() {
- try {
- if (id == null) {
- id = readClusterIdZNode(watcher);
- }
- } catch (KeeperException ke) {
- abortable.abort("Unexpected exception from ZooKeeper reading cluster ID",
- ke);
- }
- return id;
- }
-
- public static String readClusterIdZNode(ZooKeeperWatcher watcher)
- throws KeeperException {
- if (ZKUtil.checkExists(watcher, watcher.clusterIdZNode) != -1) {
- byte [] data = ZKUtil.getData(watcher, watcher.clusterIdZNode);
- if (data != null) {
- return getZNodeClusterId(data);
- }
- }
- return null;
- }
-
- public static void setClusterId(ZooKeeperWatcher watcher, String id)
- throws KeeperException {
- ZKUtil.createSetData(watcher, watcher.clusterIdZNode, getZNodeData(id));
- }
-
- /**
- * @param clusterid
- * @return Content of the clusterid znode as a serialized pb with the pb
- * magic as prefix.
- */
- static byte [] getZNodeData(final String clusterid) {
- ZooKeeperProtos.ClusterId.Builder builder =
- ZooKeeperProtos.ClusterId.newBuilder();
- builder.setClusterId(clusterid);
- return ProtobufUtil.prependPBMagic(builder.build().toByteArray());
- }
-
- /**
- * @param data
- * @return The clusterid extracted from the passed znode data
- */
- static String getZNodeClusterId(final byte [] data) {
- if (data == null || data.length <= 0) return null;
- // If no magic, something is seriously wrong. Fail fast.
- if (!ProtobufUtil.isPBMagicPrefix(data)) throw new RuntimeException("No magic preamble");
- int prefixLen = ProtobufUtil.lengthOfPBMagic();
- try {
- ZooKeeperProtos.ClusterId clusterid =
- ZooKeeperProtos.ClusterId.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build();
- return clusterid.getClusterId();
- } catch (InvalidProtocolBufferException e) {
- // A failed parse of the znode is pretty catastrophic. Fail fast.
- throw new RuntimeException(e);
- }
- }
-}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java b/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java
new file mode 100644
index 0000000..afe5892
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.DeserializationException;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Publishes and synchronizes a unique identifier specific to a given HBase
+ * cluster. The stored identifier is read from the file system by the active
+ * master on startup, and is subsequently available to all watchers (including
+ * clients).
+ */
+@InterfaceAudience.Private
+public class ZKClusterId {
+ private ZooKeeperWatcher watcher;
+ private Abortable abortable;
+ private String id;
+
+ public ZKClusterId(ZooKeeperWatcher watcher, Abortable abortable) {
+ this.watcher = watcher;
+ this.abortable = abortable;
+ }
+
+ public boolean hasId() {
+ return getId() != null;
+ }
+
+ public String getId() {
+ try {
+ if (id == null) {
+ id = readClusterIdZNode(watcher);
+ }
+ } catch (KeeperException ke) {
+ abortable.abort("Unexpected exception from ZooKeeper reading cluster ID",
+ ke);
+ }
+ return id;
+ }
+
+ public static String readClusterIdZNode(ZooKeeperWatcher watcher)
+ throws KeeperException {
+ if (ZKUtil.checkExists(watcher, watcher.clusterIdZNode) != -1) {
+ byte [] data = ZKUtil.getData(watcher, watcher.clusterIdZNode);
+ if (data != null) {
+ try {
+ return ClusterId.parseFrom(data).toString();
+ } catch (DeserializationException e) {
+ throw ZKUtil.convert(e);
+ }
+ }
+ }
+ return null;
+ }
+
+ public static void setClusterId(ZooKeeperWatcher watcher, ClusterId id)
+ throws KeeperException {
+ ZKUtil.createSetData(watcher, watcher.clusterIdZNode, id.toByteArray());
+ }
+}
\ No newline at end of file
diff --git a/src/main/protobuf/ClusterId.proto b/src/main/protobuf/ClusterId.proto
new file mode 100644
index 0000000..80b7d0f
--- /dev/null
+++ b/src/main/protobuf/ClusterId.proto
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are shared throughout HBase
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "ClusterIdProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+/**
+ * Content of the '/hbase/hbaseid', cluster id, znode.
+ * Also cluster of the ${HBASE_ROOTDIR}/hbase.id file.
+ */
+message ClusterId {
+ // This is the cluster id, a uuid as a String
+ required string clusterId = 1;
+}
diff --git a/src/main/protobuf/FS.proto b/src/main/protobuf/FS.proto
new file mode 100644
index 0000000..ca819e3
--- /dev/null
+++ b/src/main/protobuf/FS.proto
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are written into the filesystem
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "FSProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+/**
+ * The ${HBASE_ROOTDIR}/hbase.version file content
+ */
+message HBaseVersionFileContent {
+ required string version = 1;
+}
+
+/**
+ * Reference file content used when we split an hfile under a region.
+ */
+message Reference {
+ required bytes splitkey = 1;
+ enum Range {
+ TOP = 0;
+ BOTTOM = 1;
+ }
+ required Range range = 2;
+}
+
diff --git a/src/main/protobuf/ZooKeeper.proto b/src/main/protobuf/ZooKeeper.proto
index b72cb28..9773f4d 100644
--- a/src/main/protobuf/ZooKeeper.proto
+++ b/src/main/protobuf/ZooKeeper.proto
@@ -43,15 +43,6 @@ message Master {
required ServerName master = 1;
}
-// TODO: Put these two cluster attributes into the one znode.
-/**
- * Content of the '/hbase/hbaseid', cluster id, znode.
- */
-message ClusterId {
- // This is the cluster id, a uuid as a String
- required string clusterId = 1;
-}
-
/**
* Content of the '/hbase/shutdown', cluster state, znode.
*/
diff --git a/src/main/protobuf/hbase.proto b/src/main/protobuf/hbase.proto
index 30a4c3f..b57853d 100644
--- a/src/main/protobuf/hbase.proto
+++ b/src/main/protobuf/hbase.proto
@@ -24,6 +24,33 @@ option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
/**
+ * Table Schema
+ * Inspired by the rest TableSchema
+ */
+message TableSchema {
+ optional bytes name = 1;
+ message Attribute {
+ required bytes name = 1;
+ required bytes value = 2;
+ }
+ repeated Attribute attributes = 2;
+ repeated ColumnFamilySchema columnFamilies = 3;
+}
+
+/**
+ * Column Family Schema
+ * Inspired by the rest ColumSchemaMessage
+ */
+message ColumnFamilySchema {
+ required bytes name = 1;
+ message Attribute {
+ required bytes name = 1;
+ required bytes value = 2;
+ }
+ repeated Attribute attributes = 2;
+}
+
+/**
* Protocol buffer version of HRegionInfo.
*/
message RegionInfo {
@@ -196,4 +223,3 @@ message NameBytesPair {
required string name = 1;
optional bytes value = 2;
}
-
diff --git a/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java b/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java
index e7fa8b2..0482e65 100644
--- a/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java
+++ b/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
import org.junit.experimental.categories.Category;
import org.junit.Test;
@@ -25,9 +27,23 @@ import org.junit.Test;
/** Tests the HColumnDescriptor with appropriate arguments */
@Category(SmallTests.class)
public class TestHColumnDescriptor {
+ @Test
+ public void testPb() throws DeserializationException {
+ HColumnDescriptor hcd = HTableDescriptor.META_TABLEDESC.getColumnFamilies()[0];
+ final int v = 123;
+ hcd.setBlocksize(v);
+ hcd.setTimeToLive(v);
+ hcd.setBlockCacheEnabled(true);
+ hcd.setValue("a", "b");
+ byte [] bytes = hcd.toByteArray();
+ HColumnDescriptor deserializedHcd = HColumnDescriptor.parseFrom(bytes);
+ assertTrue(hcd.equals(deserializedHcd));
+ assertEquals(v, hcd.getBlocksize());
+ assertEquals(v, hcd.getTimeToLive());
+ assertEquals(hcd.getValue("a"), deserializedHcd.getValue("a"));
+ }
@Test
- @SuppressWarnings("deprecation")
/** Tests HColumnDescriptor with empty familyName*/
public void testHColumnDescriptorShouldThrowIAEWhenFamiliyNameEmpty()
throws Exception {
@@ -37,4 +53,4 @@ public class TestHColumnDescriptor {
assertEquals("Family name can not be empty", e.getLocalizedMessage());
}
}
-}
+}
\ No newline at end of file
diff --git a/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
index f7c0cca..c15ba38 100644
--- a/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
+++ b/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
@@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import java.io.IOException;
+
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -30,6 +32,20 @@ import org.junit.experimental.categories.Category;
*/
@Category(SmallTests.class)
public class TestHTableDescriptor {
+ @Test
+ public void testPb() throws DeserializationException, IOException {
+ HTableDescriptor htd = HTableDescriptor.META_TABLEDESC;
+ final int v = 123;
+ htd.setMaxFileSize(v);
+ htd.setDeferredLogFlush(true);
+ htd.setReadOnly(true);
+ byte [] bytes = htd.toByteArray();
+ HTableDescriptor deserializedHtd = HTableDescriptor.parseFrom(bytes);
+ assertEquals(htd, deserializedHtd);
+ assertEquals(v, deserializedHtd.getMaxFileSize());
+ assertTrue(deserializedHtd.isReadOnly());
+ assertTrue(deserializedHtd.isDeferredLogFlush());
+ }
/**
* Test cps in the table description
@@ -62,5 +78,4 @@ public class TestHTableDescriptor {
desc.remove(key);
assertEquals(null, desc.getValue(key));
}
-
-}
+}
\ No newline at end of file
diff --git a/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java b/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
index 83d8408..bf3c8f2 100644
--- a/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
+++ b/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.client.ServerCallable;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
@@ -205,7 +204,7 @@ public class TestCatalogTracker {
// If a 'getRegionInfo' is called on mocked AdminProtocol, throw IOE
// the first time. 'Succeed' the second time we are called.
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
- builder.setRegionInfo(ProtobufUtil.toRegionInfo(new HRegionInfo(Bytes.toBytes("test"))));
+ builder.setRegionInfo(HRegionInfo.convert(new HRegionInfo(Bytes.toBytes("test"))));
Mockito.when(admin.getRegionInfo((RpcController)Mockito.any(),
(GetRegionInfoRequest)Mockito.any())).thenThrow(
new ServiceException(new IOException("Server not running, aborting"))).
@@ -412,7 +411,7 @@ public class TestCatalogTracker {
Mockito.when(connection.getRegionServerWithRetries((ServerCallable)Mockito.any())).
thenReturn(result);
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
- builder.setRegionInfo(ProtobufUtil.toRegionInfo(HRegionInfo.FIRST_META_REGIONINFO));
+ builder.setRegionInfo(HRegionInfo.convert(HRegionInfo.FIRST_META_REGIONINFO));
Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),
(GetRegionInfoRequest)Mockito.any())).thenReturn(builder.build());
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
diff --git a/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 69ccc65..04ed1a3 100644
--- a/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -422,7 +422,7 @@ class MockRegionServer implements AdminProtocol, ClientProtocol, RegionServerSer
public GetRegionInfoResponse getRegionInfo(RpcController controller,
GetRegionInfoRequest request) throws ServiceException {
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
- builder.setRegionInfo(ProtobufUtil.toRegionInfo(HRegionInfo.ROOT_REGIONINFO));
+ builder.setRegionInfo(HRegionInfo.convert(HRegionInfo.ROOT_REGIONINFO));
return builder.build();
}
diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 1020374..5303608 100644
--- a/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -344,7 +344,7 @@ public class TestCatalogJanitor {
HTableDescriptor.getTableDir(rootdir, htd.getName());
Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(),
htd.getColumnFamilies()[0].getName());
- Reference ref = new Reference(Bytes.toBytes("ccc"), Reference.Range.top);
+ Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
long now = System.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
@@ -518,8 +518,8 @@ public class TestCatalogJanitor {
Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
Path storedir = Store.getStoreHomedir(tabledir, daughter.getEncodedName(),
htd.getColumnFamilies()[0].getName());
- Reference ref = new Reference(midkey,
- top? Reference.Range.top: Reference.Range.bottom);
+ Reference ref =
+ top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
long now = System.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
index 6dfba41..eff72e8 100644
--- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
+++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
@@ -27,10 +27,17 @@ import static org.junit.Assert.fail;
import java.io.IOException;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.DeserializationException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -38,6 +45,43 @@ import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestHRegionInfo {
@Test
+ public void testPb() throws DeserializationException {
+ HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
+ byte [] bytes = hri.toByteArray();
+ HRegionInfo pbhri = HRegionInfo.parseFrom(bytes);
+ assertTrue(hri.equals(pbhri));
+ }
+
+ @Test
+ public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
+ Path basedir = htu.getDataTestDir();
+ // Create a region. That'll write the .regioninfo file.
+ HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(),
+ HTableDescriptor.META_TABLEDESC);
+ // Get modtime on the file.
+ long modtime = getModTime(r);
+ HRegion.closeHRegion(r);
+ Thread.sleep(1001);
+ r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(), HTableDescriptor.META_TABLEDESC);
+ // Ensure the file is not written for a second time.
+ long modtime2 = getModTime(r);
+ assertEquals(modtime, modtime2);
+ // Now load the file.
+ HRegionInfo deserializedHri =
+ HRegion.loadDotRegionInfoFileContent(FileSystem.get(htu.getConfiguration()), r.getRegionDir());
+ assertTrue(hri.equals(deserializedHri));
+ }
+
+ long getModTime(final HRegion r) throws IOException {
+ FileStatus [] statuses =
+ r.getFilesystem().listStatus(new Path(r.getRegionDir(), HRegion.REGIONINFO_FILE));
+ assertTrue(statuses != null && statuses.length == 1);
+ return statuses[0].getModificationTime();
+ }
+
+ @Test
public void testCreateHRegionInfoName() throws Exception {
String tableName = "tablename";
final byte [] tn = Bytes.toBytes(tableName);
@@ -60,11 +104,11 @@ public class TestHRegionInfo {
+ id + "." + md5HashInHex + ".",
nameStr);
}
-
+
@Test
public void testGetSetOfHTD() throws IOException {
HBaseTestingUtility HTU = new HBaseTestingUtility();
- final String tablename = "testGetSetOfHTD";
+ final String tablename = "testGetSetOfHTD";
// Delete the temporary table directory that might still be there from the
// previous test run.
diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index 988d0bf..2669cdb 100644
--- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.Reference.Range;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -156,7 +155,7 @@ public class TestStoreFile extends HBaseTestCase {
kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
byte [] finalRow = kv.getRow();
// Make a reference
- Path refPath = StoreFile.split(fs, dir, hsf, midRow, Range.top);
+ Path refPath = StoreFile.split(fs, dir, hsf, midRow, true);
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
// Now confirm that I can read from the reference and that it only gets
@@ -184,15 +183,14 @@ public class TestStoreFile extends HBaseTestCase {
if (this.fs.exists(topDir)) {
this.fs.delete(topDir, true);
}
- Path topPath = StoreFile.split(this.fs, topDir, f, midRow, Range.top);
+ Path topPath = StoreFile.split(this.fs, topDir, f, midRow, true);
// Create bottom split.
Path bottomDir = Store.getStoreHomedir(this.testDir, "2",
Bytes.toBytes(f.getPath().getParent().getName()));
if (this.fs.exists(bottomDir)) {
this.fs.delete(bottomDir, true);
}
- Path bottomPath = StoreFile.split(this.fs, bottomDir,
- f, midRow, Range.bottom);
+ Path bottomPath = StoreFile.split(this.fs, bottomDir, f, midRow, false);
// Make readers on top and bottom.
StoreFile.Reader top =
new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
@@ -251,9 +249,8 @@ public class TestStoreFile extends HBaseTestCase {
// First, do a key that is < than first key. Ensure splits behave
// properly.
byte [] badmidkey = Bytes.toBytes(" .");
- topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
- bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
- Range.bottom);
+ topPath = StoreFile.split(this.fs, topDir, f, badmidkey, true);
+ bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, false);
top = new StoreFile(this.fs, topPath, conf, cacheConf,
StoreFile.BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
@@ -298,9 +295,8 @@ public class TestStoreFile extends HBaseTestCase {
// Test when badkey is > than last key in file ('||' > 'zz').
badmidkey = Bytes.toBytes("|||");
- topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
- bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
- Range.bottom);
+ topPath = StoreFile.split(this.fs, topDir, f, badmidkey, true);
+ bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, false);
top = new StoreFile(this.fs, topPath, conf, cacheConf,
StoreFile.BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
@@ -442,8 +438,7 @@ public class TestStoreFile extends HBaseTestCase {
}
writer.close();
- StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
- DataBlockEncoding.NONE);
+ StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, DataBlockEncoding.NONE);
reader.loadFileInfo();
reader.loadBloomfilter();
diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index 339a120..b732f5a 100644
--- a/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ b/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -21,9 +21,12 @@ package org.apache.hadoop.hbase.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
+import java.io.IOException;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
@@ -32,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@@ -46,6 +50,30 @@ import org.junit.experimental.categories.Category;
*/
@Category(MediumTests.class)
public class TestFSUtils {
+ @Test
+ public void testVersion() throws DeserializationException, IOException {
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ final FileSystem fs = htu.getTestFileSystem();
+ final Path rootdir = htu.getDataTestDir();
+ assertNull(FSUtils.getVersion(fs, rootdir));
+ // Write out old format version file. See if we can read it in and convert.
+ Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
+ FSDataOutputStream s = fs.create(versionFile);
+ final String version = HConstants.FILE_SYSTEM_VERSION;
+ s.writeUTF(version);
+ s.close();
+ assertTrue(fs.exists(versionFile));
+ FileStatus [] status = fs.listStatus(versionFile);
+ assertNotNull(status);
+ assertTrue(status.length > 0);
+ String newVersion = FSUtils.getVersion(fs, rootdir);
+ assertEquals(version.length(), newVersion.length());
+ assertEquals(version, newVersion);
+ // File will have been converted. Exercise the pb format
+ assertEquals(version, FSUtils.getVersion(fs, rootdir));
+ FSUtils.checkVersion(fs, rootdir, true);
+ }
+
@Test public void testIsHDFS() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
htu.getConfiguration().setBoolean("dfs.support.append", false);