diff --git a/bin/hbase b/bin/hbase index d55688b..4eeb145 100755 --- a/bin/hbase +++ b/bin/hbase @@ -137,6 +137,16 @@ add_to_cp_if_exists() { fi } +# For releases, add hbase & webapps to CLASSPATH +# Webapps must come first else it messes up Jetty +if [ -d "$HBASE_HOME/hbase-webapps" ]; then + add_to_cp_if_exists "${HBASE_HOME}" +fi +#add if we are in a dev environment +if [ -d "$HBASE_HOME/hbase-server/target/hbase-webapps" ]; then + add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target" +fi + add_maven_deps_to_classpath() { # Need to generate classpath from maven pom. This is costly so generate it # and cache it. Save the file into our target dir so a mvn clean will get @@ -168,23 +178,13 @@ add_maven_test_classes_to_classpath(){ done } -# Add maven target directory +#Add the development env class path stuff if $in_dev_env; then add_maven_deps_to_classpath add_maven_main_classes_to_classpath add_maven_test_classes_to_classpath fi -# For releases, add hbase & webapps to CLASSPATH -# Webapps must come first else it messes up Jetty -if [ -d "$HBASE_HOME/hbase-webapps" ]; then - add_to_cp_if_exists "${HBASE_HOME}" -fi -#add if we are in a dev environment -if [ -d "$HBASE_HOME/hbase-server/target/hbase-webapps" ]; then - add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target" -fi - #add the hbase jars for each module for f in $HBASE_HOME/hbase-jars/hbase*.jar; do if [[ $f = *sources.jar ]] diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 8a7e344..b3edd91 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -185,6 +185,10 @@ + org.apache.hbase + hbase-common + + hbase-server org.apache.hbase compile diff --git a/hbase-assembly/src/assembly/all.xml b/hbase-assembly/src/assembly/all.xml index 89bd028..1883fb2 100644 --- a/hbase-assembly/src/assembly/all.xml +++ b/hbase-assembly/src/assembly/all.xml @@ -161,6 +161,7 @@ hbase-server-${project.version}.jar + hbase-common-${project.version>.jar target/ test/ .classpath diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml new file mode 100644 index 0000000..5b7fa7b --- /dev/null +++ b/hbase-common/pom.xml @@ -0,0 +1,173 @@ + + + 4.0.0 + + hbase + org.apache.hbase + 0.95-SNAPSHOT + .. + + + hbase-common + HBase - Common + Common functionality for HBase + + + + + com.google.guava + guava + + + commons-logging + commons-logging + + + + + + + hadoop-1.0 + + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-core + + + org.apache.hadoop + hadoop-test + + + + + + + hadoop-2.0 + + + hadoop.profile + 2.0 + + + + + org.apache.hadoop + hadoop-client + + + org.apache.hadoop + hadoop-annotations + + + org.apache.hadoop + hadoop-minicluster + + + + + + maven-dependency-plugin + + + create-mrapp-generated-classpath + generate-test-resources + + build-classpath + + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + 3.0-SNAPSHOT + + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-annotations + + + org.apache.hadoop + hadoop-minicluster + + + + + + maven-dependency-plugin + + + create-mrapp-generated-classpath + generate-test-resources + + build-classpath + + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + + + + \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java new file mode 100644 index 0000000..97af168 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -0,0 +1,152 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.VersionInfo; + +/** + * Adds HBase configuration files to a Configuration + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class HBaseConfiguration extends Configuration { + + private static final Log LOG = LogFactory.getLog(HBaseConfiguration.class); + + // a constant to convert a fraction to a percentage + private static final int CONVERT_TO_PERCENTAGE = 100; + + /** + * Instantinating HBaseConfiguration() is deprecated. Please use + * HBaseConfiguration#create() to construct a plain Configuration + */ + @Deprecated + public HBaseConfiguration() { + //TODO:replace with private constructor, HBaseConfiguration should not extend Configuration + super(); + addHbaseResources(this); + LOG.warn("instantiating HBaseConfiguration() is deprecated. Please use" + + " HBaseConfiguration#create() to construct a plain Configuration"); + } + + /** + * Instantiating HBaseConfiguration() is deprecated. Please use + * HBaseConfiguration#create(conf) to construct a plain Configuration + */ + @Deprecated + public HBaseConfiguration(final Configuration c) { + //TODO:replace with private constructor + this(); + merge(this, c); + } + + private static void checkDefaultsVersion(Configuration conf) { + if (conf.getBoolean("hbase.defaults.for.version.skip", Boolean.FALSE)) return; + String defaultsVersion = conf.get("hbase.defaults.for.version"); + String thisVersion = VersionInfo.getVersion(); + if (!thisVersion.equals(defaultsVersion)) { + throw new RuntimeException( + "hbase-default.xml file seems to be for and old version of HBase (" + + defaultsVersion + "), this version is " + thisVersion); + } + } + + private static void checkForClusterFreeMemoryLimit(Configuration conf) { + float globalMemstoreLimit = conf.getFloat("hbase.regionserver.global.memstore.upperLimit", 0.4f); + int gml = (int)(globalMemstoreLimit * CONVERT_TO_PERCENTAGE); + float blockCacheUpperLimit = + conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, + HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + int bcul = (int)(blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); + if (CONVERT_TO_PERCENTAGE - (gml + bcul) + < (int)(CONVERT_TO_PERCENTAGE * + HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD)) { + throw new RuntimeException( + "Current heap configuration for MemStore and BlockCache exceeds " + + "the threshold required for successful cluster operation. " + + "The combined value cannot exceed 0.8. Please check " + + "the settings for hbase.regionserver.global.memstore.upperLimit and " + + "hfile.block.cache.size in your configuration. " + + "hbase.regionserver.global.memstore.upperLimit is " + + globalMemstoreLimit + + " hfile.block.cache.size is " + blockCacheUpperLimit); + } + } + + public static Configuration addHbaseResources(Configuration conf) { + conf.addResource("hbase-default.xml"); + conf.addResource("hbase-site.xml"); + + checkDefaultsVersion(conf); + checkForClusterFreeMemoryLimit(conf); + return conf; + } + + /** + * Creates a Configuration with HBase resources + * @return a Configuration with HBase resources + */ + public static Configuration create() { + Configuration conf = new Configuration(); + return addHbaseResources(conf); + } + + /** + * Creates a clone of passed configuration. + * @param that Configuration to clone. + * @return a clone of passed configuration. + */ + public static Configuration create(final Configuration that) { + return new Configuration(that); + } + + /** + * Merge two configurations. + * @param destConf the configuration that will be overwritten with items + * from the srcConf + * @param srcConf the source configuration + **/ + public static void merge(Configuration destConf, Configuration srcConf) { + for (Entry e : srcConf) { + destConf.set(e.getKey(), e.getValue()); + } + } + + /** + * @return whether to show HBase Configuration in servlet + */ + public static boolean isShowConfInServlet() { + boolean isShowConf = false; + try { + if (Class.forName("org.apache.hadoop.conf.ConfServlet") != null) { + isShowConf = true; + } + } catch (Exception e) { + } + return isShowConf; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java new file mode 100644 index 0000000..0075aa5 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -0,0 +1,676 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.regex.Pattern; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * HConstants holds a bunch of HBase-related constants + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public final class HConstants { + /** + * Status codes used for return values of bulk operations. + */ + public enum OperationStatusCode { + NOT_RUN, + SUCCESS, + SANITY_CHECK_FAILURE, + FAILURE; + } + + /** long constant for zero */ + public static final Long ZERO_L = Long.valueOf(0L); + public static final String NINES = "99999999999999"; + public static final String ZEROES = "00000000000000"; + + // For migration + + /** name of version file */ + public static final String VERSION_FILE_NAME = "hbase.version"; + + /** + * Current version of file system. + * Version 4 supports only one kind of bloom filter. + * Version 5 changes versions in catalog table regions. + * Version 6 enables blockcaching on catalog tables. + * Version 7 introduces hfile -- hbase 0.19 to 0.20.. + */ + // public static final String FILE_SYSTEM_VERSION = "6"; + public static final String FILE_SYSTEM_VERSION = "7"; + + // Configuration parameters + + //TODO: Is having HBase homed on port 60k OK? + + /** Cluster is in distributed mode or not */ + public static final String CLUSTER_DISTRIBUTED = "hbase.cluster.distributed"; + + /** Config for pluggable load balancers */ + public static final String HBASE_MASTER_LOADBALANCER_CLASS = "hbase.master.loadbalancer.class"; + + /** Cluster is standalone or pseudo-distributed */ + public static final boolean CLUSTER_IS_LOCAL = false; + + /** Cluster is fully-distributed */ + public static final boolean CLUSTER_IS_DISTRIBUTED = true; + + /** Default value for cluster distributed mode */ + public static final boolean DEFAULT_CLUSTER_DISTRIBUTED = CLUSTER_IS_LOCAL; + + /** default host address */ + public static final String DEFAULT_HOST = "0.0.0.0"; + + /** Parameter name for port master listens on. */ + public static final String MASTER_PORT = "hbase.master.port"; + + /** default port that the master listens on */ + public static final int DEFAULT_MASTER_PORT = 60000; + + /** default port for master web api */ + public static final int DEFAULT_MASTER_INFOPORT = 60010; + + /** Configuration key for master web API port */ + public static final String MASTER_INFO_PORT = "hbase.master.info.port"; + + /** Parameter name for the master type being backup (waits for primary to go inactive). */ + public static final String MASTER_TYPE_BACKUP = "hbase.master.backup"; + + /** by default every master is a possible primary master unless the conf explicitly overrides it */ + public static final boolean DEFAULT_MASTER_TYPE_BACKUP = false; + + /** Parameter name for ZooKeeper session time out.*/ + public static final String ZOOKEEPER_SESSION_TIMEOUT = + "zookeeper.session.timeout"; + + /** Name of ZooKeeper quorum configuration parameter. */ + public static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum"; + + /** Name of ZooKeeper config file in conf/ directory. */ + public static final String ZOOKEEPER_CONFIG_NAME = "zoo.cfg"; + + /** Common prefix of ZooKeeper configuration properties */ + public static final String ZK_CFG_PROPERTY_PREFIX = + "hbase.zookeeper.property."; + + public static final int ZK_CFG_PROPERTY_PREFIX_LEN = + ZK_CFG_PROPERTY_PREFIX.length(); + + /** + * The ZK client port key in the ZK properties map. The name reflects the + * fact that this is not an HBase configuration key. + */ + public static final String CLIENT_PORT_STR = "clientPort"; + + /** Parameter name for the client port that the zookeeper listens on */ + public static final String ZOOKEEPER_CLIENT_PORT = + ZK_CFG_PROPERTY_PREFIX + CLIENT_PORT_STR; + + /** Default client port that the zookeeper listens on */ + public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181; + + /** Parameter name for the wait time for the recoverable zookeeper */ + public static final String ZOOKEEPER_RECOVERABLE_WAITTIME = "hbase.zookeeper.recoverable.waittime"; + + /** Default wait time for the recoverable zookeeper */ + public static final long DEFAULT_ZOOKEPER_RECOVERABLE_WAITIME = 10000; + + /** Parameter name for the root dir in ZK for this cluster */ + public static final String ZOOKEEPER_ZNODE_PARENT = "zookeeper.znode.parent"; + + public static final String DEFAULT_ZOOKEEPER_ZNODE_PARENT = "/hbase"; + + /** + * Parameter name for the limit on concurrent client-side zookeeper + * connections + */ + public static final String ZOOKEEPER_MAX_CLIENT_CNXNS = + ZK_CFG_PROPERTY_PREFIX + "maxClientCnxns"; + + /** Parameter name for the ZK data directory */ + public static final String ZOOKEEPER_DATA_DIR = + ZK_CFG_PROPERTY_PREFIX + "dataDir"; + + /** Default limit on concurrent client-side zookeeper connections */ + public static final int DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS = 300; + + /** Configuration key for ZooKeeper session timeout */ + public static final String ZK_SESSION_TIMEOUT = "zookeeper.session.timeout"; + + /** Default value for ZooKeeper session timeout */ + public static final int DEFAULT_ZK_SESSION_TIMEOUT = 180 * 1000; + + /** Parameter name for port region server listens on. */ + public static final String REGIONSERVER_PORT = "hbase.regionserver.port"; + + /** Default port region server listens on. */ + public static final int DEFAULT_REGIONSERVER_PORT = 60020; + + /** default port for region server web api */ + public static final int DEFAULT_REGIONSERVER_INFOPORT = 60030; + + /** A configuration key for regionserver info port */ + public static final String REGIONSERVER_INFO_PORT = + "hbase.regionserver.info.port"; + + /** A flag that enables automatic selection of regionserver info port */ + public static final String REGIONSERVER_INFO_PORT_AUTO = + REGIONSERVER_INFO_PORT + ".auto"; + + /** Parameter name for what region server implementation to use. */ + public static final String REGION_SERVER_IMPL= "hbase.regionserver.impl"; + + /** Parameter name for what master implementation to use. */ + public static final String MASTER_IMPL= "hbase.master.impl"; + + /** Parameter name for how often threads should wake up */ + public static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency"; + + /** Default value for thread wake frequency */ + public static final int DEFAULT_THREAD_WAKE_FREQUENCY = 10 * 1000; + + /** Parameter name for how often we should try to write a version file, before failing */ + public static final String VERSION_FILE_WRITE_ATTEMPTS = "hbase.server.versionfile.writeattempts"; + + /** Parameter name for how often we should try to write a version file, before failing */ + public static final int DEFAULT_VERSION_FILE_WRITE_ATTEMPTS = 3; + + /** Parameter name for how often a region should should perform a major compaction */ + public static final String MAJOR_COMPACTION_PERIOD = "hbase.hregion.majorcompaction"; + + /** Parameter name for HBase instance root directory */ + public static final String HBASE_DIR = "hbase.rootdir"; + + /** Parameter name for HBase client IPC pool type */ + public static final String HBASE_CLIENT_IPC_POOL_TYPE = "hbase.client.ipc.pool.type"; + + /** Parameter name for HBase client IPC pool size */ + public static final String HBASE_CLIENT_IPC_POOL_SIZE = "hbase.client.ipc.pool.size"; + + /** Parameter name for HBase client operation timeout, which overrides RPC timeout */ + public static final String HBASE_CLIENT_OPERATION_TIMEOUT = "hbase.client.operation.timeout"; + + /** Default HBase client operation timeout, which is tantamount to a blocking call */ + public static final int DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT = Integer.MAX_VALUE; + + /** Used to construct the name of the log directory for a region server + * Use '.' as a special character to seperate the log files from table data */ + public static final String HREGION_LOGDIR_NAME = ".logs"; + + /** Used to construct the name of the splitlog directory for a region server */ + public static final String SPLIT_LOGDIR_NAME = "splitlog"; + + public static final String CORRUPT_DIR_NAME = ".corrupt"; + + /** Like the previous, but for old logs that are about to be deleted */ + public static final String HREGION_OLDLOGDIR_NAME = ".oldlogs"; + + /** Used to construct the name of the compaction directory during compaction */ + public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir"; + + /** Conf key for the max file size after which we split the region */ + public static final String HREGION_MAX_FILESIZE = + "hbase.hregion.max.filesize"; + + /** Default maximum file size */ + public static final long DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L; + + /** + * The max number of threads used for opening and closing stores or store + * files in parallel + */ + public static final String HSTORE_OPEN_AND_CLOSE_THREADS_MAX = + "hbase.hstore.open.and.close.threads.max"; + + /** + * The default number for the max number of threads used for opening and + * closing stores or store files in parallel + */ + public static final int DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX = 1; + + + /** Conf key for the memstore size at which we flush the memstore */ + public static final String HREGION_MEMSTORE_FLUSH_SIZE = + "hbase.hregion.memstore.flush.size"; + + /** Default size of a reservation block */ + public static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5; + + /** Maximum value length, enforced on KeyValue construction */ + public static final int MAXIMUM_VALUE_LENGTH = Integer.MAX_VALUE; + + /** name of the file for unique cluster ID */ + public static final String CLUSTER_ID_FILE_NAME = "hbase.id"; + + /** Configuration key storing the cluster ID */ + public static final String CLUSTER_ID = "hbase.cluster.id"; + + // Always store the location of the root table's HRegion. + // This HRegion is never split. + + // region name = table + startkey + regionid. This is the row key. + // each row in the root and meta tables describes exactly 1 region + // Do we ever need to know all the information that we are storing? + + // Note that the name of the root table starts with "-" and the name of the + // meta table starts with "." Why? it's a trick. It turns out that when we + // store region names in memory, we use a SortedMap. Since "-" sorts before + // "." (and since no other table name can start with either of these + // characters, the root region will always be the first entry in such a Map, + // followed by all the meta regions (which will be ordered by their starting + // row key as well), followed by all user tables. So when the Master is + // choosing regions to assign, it will always choose the root region first, + // followed by the meta regions, followed by user regions. Since the root + // and meta regions always need to be on-line, this ensures that they will + // be the first to be reassigned if the server(s) they are being served by + // should go down. + + /** The root table's name.*/ + public static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-"); + + /** The META table's name. */ + public static final byte [] META_TABLE_NAME = Bytes.toBytes(".META."); + + /** delimiter used between portions of a region name */ + public static final int META_ROW_DELIMITER = ','; + + /** The catalog family as a string*/ + public static final String CATALOG_FAMILY_STR = "info"; + + /** The catalog family */ + public static final byte [] CATALOG_FAMILY = Bytes.toBytes(CATALOG_FAMILY_STR); + + /** The RegionInfo qualifier as a string */ + public static final String REGIONINFO_QUALIFIER_STR = "regioninfo"; + + /** The regioninfo column qualifier */ + public static final byte [] REGIONINFO_QUALIFIER = + Bytes.toBytes(REGIONINFO_QUALIFIER_STR); + + /** The server column qualifier */ + public static final byte [] SERVER_QUALIFIER = Bytes.toBytes("server"); + + /** The startcode column qualifier */ + public static final byte [] STARTCODE_QUALIFIER = Bytes.toBytes("serverstartcode"); + + /** The lower-half split region column qualifier */ + public static final byte [] SPLITA_QUALIFIER = Bytes.toBytes("splitA"); + + /** The upper-half split region column qualifier */ + public static final byte [] SPLITB_QUALIFIER = Bytes.toBytes("splitB"); + + /** + * The meta table version column qualifier. + * We keep current version of the meta table in this column in -ROOT- + * table: i.e. in the 'info:v' column. + */ + public static final byte [] META_VERSION_QUALIFIER = Bytes.toBytes("v"); + + /** + * The current version of the meta table. + * Before this the meta had HTableDescriptor serialized into the HRegionInfo; + * i.e. pre-hbase 0.92. There was no META_VERSION column in the root table + * in this case. The presence of a version and its value being zero indicates + * meta is up-to-date. + */ + public static final short META_VERSION = 0; + + // Other constants + + /** + * An empty instance. + */ + public static final byte [] EMPTY_BYTE_ARRAY = new byte [0]; + + /** + * Used by scanners, etc when they want to start at the beginning of a region + */ + public static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY; + + /** + * Last row in a table. + */ + public static final byte [] EMPTY_END_ROW = EMPTY_START_ROW; + + /** + * Used by scanners and others when they're trying to detect the end of a + * table + */ + public static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY; + + /** + * Max length a row can have because of the limitation in TFile. + */ + public static final int MAX_ROW_LENGTH = Short.MAX_VALUE; + + /** When we encode strings, we always specify UTF8 encoding */ + public static final String UTF8_ENCODING = "UTF-8"; + + /** + * Timestamp to use when we want to refer to the latest cell. + * This is the timestamp sent by clients when no timestamp is specified on + * commit. + */ + public static final long LATEST_TIMESTAMP = Long.MAX_VALUE; + + /** + * Timestamp to use when we want to refer to the oldest cell. + */ + public static final long OLDEST_TIMESTAMP = Long.MIN_VALUE; + + /** + * LATEST_TIMESTAMP in bytes form + */ + public static final byte [] LATEST_TIMESTAMP_BYTES = Bytes.toBytes(LATEST_TIMESTAMP); + + /** + * Define for 'return-all-versions'. + */ + public static final int ALL_VERSIONS = Integer.MAX_VALUE; + + /** + * Unlimited time-to-live. + */ +// public static final int FOREVER = -1; + public static final int FOREVER = Integer.MAX_VALUE; + + /** + * Seconds in a week + */ + public static final int WEEK_IN_SECONDS = 7 * 24 * 3600; + + //TODO: although the following are referenced widely to format strings for + // the shell. They really aren't a part of the public API. It would be + // nice if we could put them somewhere where they did not need to be + // public. They could have package visibility + public static final String NAME = "NAME"; + public static final String VERSIONS = "VERSIONS"; + public static final String IN_MEMORY = "IN_MEMORY"; + public static final String CONFIG = "CONFIG"; + + /** + * This is a retry backoff multiplier table similar to the BSD TCP syn + * backoff table, a bit more aggressive than simple exponential backoff. + */ + public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 }; + + public static final String REGION_IMPL = "hbase.hregion.impl"; + + /** modifyTable op for replacing the table descriptor */ + public static enum Modify { + CLOSE_REGION, + TABLE_COMPACT, + TABLE_FLUSH, + TABLE_MAJOR_COMPACT, + TABLE_SET_HTD, + TABLE_SPLIT + } + + /** + * Scope tag for locally scoped data. + * This data will not be replicated. + */ + public static final int REPLICATION_SCOPE_LOCAL = 0; + + /** + * Scope tag for globally scoped data. + * This data will be replicated to all peers. + */ + public static final int REPLICATION_SCOPE_GLOBAL = 1; + + /** + * Default cluster ID, cannot be used to identify a cluster so a key with + * this value means it wasn't meant for replication. + */ + public static final UUID DEFAULT_CLUSTER_ID = new UUID(0L,0L); + + /** + * Parameter name for maximum number of bytes returned when calling a + * scanner's next method. + */ + public static String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size"; + + /** + * Maximum number of bytes returned when calling a scanner's next method. + * Note that when a single row is larger than this limit the row is still + * returned completely. + * + * The default value is unlimited. + */ + public static long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = Long.MAX_VALUE; + + /** + * Parameter name for client pause value, used mostly as value to wait + * before running a retry of a failed get, region lookup, etc. + */ + public static String HBASE_CLIENT_PAUSE = "hbase.client.pause"; + + /** + * Default value of {@link #HBASE_CLIENT_PAUSE}. + */ + public static long DEFAULT_HBASE_CLIENT_PAUSE = 1000; + + /** + * Parameter name for maximum retries, used as maximum for all retryable + * operations such as fetching of the root region from root region server, + * getting a cell's value, starting a row update, etc. + */ + public static String HBASE_CLIENT_RETRIES_NUMBER = "hbase.client.retries.number"; + + /** + * Default value of {@link #HBASE_CLIENT_RETRIES_NUMBER}. + */ + public static int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10; + + /** + * Parameter name for maximum attempts, used to limit the number of times the + * client will try to obtain the proxy for a given region server. + */ + public static String HBASE_CLIENT_RPC_MAXATTEMPTS = "hbase.client.rpc.maxattempts"; + + /** + * Default value of {@link #HBASE_CLIENT_RPC_MAXATTEMPTS}. + */ + public static int DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS = 1; + + /** + * Parameter name for client prefetch limit, used as the maximum number of regions + * info that will be prefetched. + */ + public static String HBASE_CLIENT_PREFETCH_LIMIT = "hbase.client.prefetch.limit"; + + /** + * Default value of {@link #HBASE_CLIENT_PREFETCH_LIMIT}. + */ + public static int DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT = 10; + + /** + * Parameter name for number of rows that will be fetched when calling next on + * a scanner if it is not served from memory. Higher caching values will + * enable faster scanners but will eat up more memory and some calls of next + * may take longer and longer times when the cache is empty. + */ + public static String HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching"; + + /** + * Default value of {@link #HBASE_META_SCANNER_CACHING}. + */ + public static int DEFAULT_HBASE_META_SCANNER_CACHING = 100; + + /** + * Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration} + * instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that, + * for all intents and purposes, are the same except for their instance ids, + * then they will not be able to share the same {@link org.apache.hadoop.hbase.client.HConnection} instance. + * On the other hand, even if the instance ids are the same, it could result + * in non-shared {@link org.apache.hadoop.hbase.client.HConnection} + * instances if some of the other connection parameters differ. + */ + public static String HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id"; + + /** + * HRegion server lease period in milliseconds. Clients must report in within this period + * else they are considered dead. Unit measured in ms (milliseconds). + */ + public static String HBASE_REGIONSERVER_LEASE_PERIOD_KEY = + "hbase.regionserver.lease.period"; + + /** + * Default value of {@link #HBASE_REGIONSERVER_LEASE_PERIOD_KEY}. + */ + public static long DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD = 60000; + + /** + * timeout for each RPC + */ + public static String HBASE_RPC_TIMEOUT_KEY = "hbase.rpc.timeout"; + + /** + * Default value of {@link #HBASE_RPC_TIMEOUT_KEY} + */ + public static int DEFAULT_HBASE_RPC_TIMEOUT = 60000; + + /* + * cluster replication constants. + */ + public static final String + REPLICATION_ENABLE_KEY = "hbase.replication"; + public static final String + REPLICATION_SOURCE_SERVICE_CLASSNAME = "hbase.replication.source.service"; + public static final String + REPLICATION_SINK_SERVICE_CLASSNAME = "hbase.replication.sink.service"; + public static final String REPLICATION_SERVICE_CLASSNAME_DEFAULT = + "org.apache.hadoop.hbase.replication.regionserver.Replication"; + + /** HBCK special code name used as server name when manipulating ZK nodes */ + public static final String HBCK_CODE_NAME = "HBCKServerName"; + + public static final String KEY_FOR_HOSTNAME_SEEN_BY_MASTER = + "hbase.regionserver.hostname.seen.by.master"; + + public static final String HBASE_MASTER_LOGCLEANER_PLUGINS = + "hbase.master.logcleaner.plugins"; + + public static final String HBASE_REGION_SPLIT_POLICY_KEY = + "hbase.regionserver.region.split.policy"; + + /** + * Configuration key for the size of the block cache + */ + public static final String HFILE_BLOCK_CACHE_SIZE_KEY = + "hfile.block.cache.size"; + + public static final float HFILE_BLOCK_CACHE_SIZE_DEFAULT = 0.25f; + + /* + * Minimum percentage of free heap necessary for a successful cluster startup. + */ + public static final float HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD = 0.2f; + + public static final List HBASE_NON_USER_TABLE_DIRS = new ArrayList( + Arrays.asList(new String[]{ HREGION_LOGDIR_NAME, HREGION_OLDLOGDIR_NAME, + CORRUPT_DIR_NAME, Bytes.toString(META_TABLE_NAME), + Bytes.toString(ROOT_TABLE_NAME), SPLIT_LOGDIR_NAME })); + + public static final Pattern CP_HTD_ATTR_KEY_PATTERN = Pattern.compile + ("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); + public static final Pattern CP_HTD_ATTR_VALUE_PATTERN = + Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); + + public static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; + public static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; + public static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile( + "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + + CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); + + /** The delay when re-trying a socket operation in a loop (HBASE-4712) */ + public static final int SOCKET_RETRY_WAIT_MS = 200; + + /** Host name of the local machine */ + public static final String LOCALHOST = "localhost"; + + /** + * If this parameter is set to true, then hbase will read + * data and then verify checksums. Checksum verification + * inside hdfs will be switched off. However, if the hbase-checksum + * verification fails, then it will switch back to using + * hdfs checksums for verifiying data that is being read from storage. + * + * If this parameter is set to false, then hbase will not + * verify any checksums, instead it will depend on checksum verification + * being done in the hdfs client. + */ + public static final String HBASE_CHECKSUM_VERIFICATION = + "hbase.regionserver.checksum.verify"; + + public static final String LOCALHOST_IP = "127.0.0.1"; + + /** Conf key that enables distributed log splitting */ + public static final String DISTRIBUTED_LOG_SPLITTING_KEY = + "hbase.master.distributed.log.splitting"; + + /** + * The name of the configuration parameter that specifies + * the number of bytes in a newly created checksum chunk. + */ + public static final String BYTES_PER_CHECKSUM = + "hbase.hstore.bytes.per.checksum"; + + /** + * The name of the configuration parameter that specifies + * the name of an algorithm that is used to compute checksums + * for newly created blocks. + */ + public static final String CHECKSUM_TYPE_NAME = + "hbase.hstore.checksum.algorithm"; + + /** Enable file permission modification from standard hbase */ + public static final String ENABLE_DATA_FILE_UMASK = "hbase.data.umask.enable"; + /** File permission umask to use when creating hbase data files */ + public static final String DATA_FILE_UMASK_KEY = "hbase.data.umask"; + + /** Configuration name of HLog Compression */ + public static final String ENABLE_WAL_COMPRESSION = + "hbase.regionserver.wal.enablecompression"; + +/** Region in Transition metrics threshold time */ + public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD="hbase.metrics.rit.stuck.warning.threshold"; + + public static final String LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop"; + + /** + * The byte array represents for NO_NEXT_INDEXED_KEY; + * The actual value is irrelevant because this is always compared by reference. + */ + public static final byte [] NO_NEXT_INDEXED_KEY = Bytes.toBytes("NO_NEXT_INDEXED_KEY"); + + private HConstants() { + // Can't be instantiated with this ctor. + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/LargeTests.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/LargeTests.java new file mode 100644 index 0000000..f1b46fa --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/LargeTests.java @@ -0,0 +1,38 @@ +/* + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +/** + * Tag a test as 'large', meaning that the test class has the following + * characteristics: + * - executed in an isolated JVM. Tests can however be executed in different + * JVM on the same machine simultaneously. + * - will not have to be executed by the developer before submitting a bug + * - ideally, last less than 2 minutes to help parallelization + * + * It the worst case compared to small or medium, use it only for tests that + * you cannot put in the other categories + * + * @see SmallTests + * @see MediumTests + */ +public interface LargeTests { +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MediumTests.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MediumTests.java new file mode 100644 index 0000000..bbbde7c --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MediumTests.java @@ -0,0 +1,37 @@ +/* + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +/** + * Tag a test as 'Medium', meaning that the test class has the following + * characteristics: + * - executed in an isolated JVM. Tests can however be executed in different + * JVM on the same machine simultaneously. + * - will have to be executed by the developer before submitting a bug + * - ideally, last less than 1 minutes to help parallelization + * + * Use it for tests that cannot be tagged as 'Small'. + * + * @see SmallTests + * @see LargeTests + */ +public interface MediumTests { +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SmallTests.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SmallTests.java new file mode 100644 index 0000000..c702f5a --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SmallTests.java @@ -0,0 +1,34 @@ +/* + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +/** + * Tag a test as 'small', meaning that the test class has the following + * characteristics: + * - can be run simultaneously with other small tests in the same JVM + * - ideally, last less than 15 seconds + * - does not use a cluster + * + * @see MediumTests + * @see LargeTests + */ +public interface SmallTests { +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java new file mode 100644 index 0000000..c9efee1 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.lang.annotation.*; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * A package attribute that captures the version of hbase that was compiled. + * Copied down from hadoop. All is same except name of interface. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.PACKAGE) +@InterfaceAudience.Private +public @interface VersionAnnotation { + + /** + * Get the Hadoop version + * @return the version string "0.6.3-dev" + */ + String version(); + + /** + * Get the username that compiled Hadoop. + */ + String user(); + + /** + * Get the date when Hadoop was compiled. + * @return the date in unix 'date' format + */ + String date(); + + /** + * Get the url for the subversion repository. + */ + String url(); + + /** + * Get the subversion revision. + * @return the revision number as a string (eg. "451451") + */ + String revision(); +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java new file mode 100644 index 0000000..926f12d --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java @@ -0,0 +1,273 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import java.io.IOException; +import java.io.DataInput; +import java.io.DataOutput; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; + +/** + * A byte sequence that is usable as a key or value. Based on + * {@link org.apache.hadoop.io.BytesWritable} only this class is NOT resizable + * and DOES NOT distinguish between the size of the seqeunce and the current + * capacity as {@link org.apache.hadoop.io.BytesWritable} does. Hence its + * comparatively 'immutable'. When creating a new instance of this class, + * the underlying byte [] is not copied, just referenced. The backing + * buffer is accessed when we go to serialize. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ImmutableBytesWritable +implements WritableComparable { + private byte[] bytes; + private int offset; + private int length; + + /** + * Create a zero-size sequence. + */ + public ImmutableBytesWritable() { + super(); + } + + /** + * Create a ImmutableBytesWritable using the byte array as the initial value. + * @param bytes This array becomes the backing storage for the object. + */ + public ImmutableBytesWritable(byte[] bytes) { + this(bytes, 0, bytes.length); + } + + /** + * Set the new ImmutableBytesWritable to the contents of the passed + * ibw. + * @param ibw the value to set this ImmutableBytesWritable to. + */ + public ImmutableBytesWritable(final ImmutableBytesWritable ibw) { + this(ibw.get(), 0, ibw.getSize()); + } + + /** + * Set the value to a given byte range + * @param bytes the new byte range to set to + * @param offset the offset in newData to start at + * @param length the number of bytes in the range + */ + public ImmutableBytesWritable(final byte[] bytes, final int offset, + final int length) { + this.bytes = bytes; + this.offset = offset; + this.length = length; + } + + /** + * Get the data from the BytesWritable. + * @return The data is only valid between offset and offset+length. + */ + public byte [] get() { + if (this.bytes == null) { + throw new IllegalStateException("Uninitialiized. Null constructor " + + "called w/o accompaying readFields invocation"); + } + return this.bytes; + } + + /** + * @param b Use passed bytes as backing array for this instance. + */ + public void set(final byte [] b) { + set(b, 0, b.length); + } + + /** + * @param b Use passed bytes as backing array for this instance. + * @param offset + * @param length + */ + public void set(final byte [] b, final int offset, final int length) { + this.bytes = b; + this.offset = offset; + this.length = length; + } + + /** + * @return the number of valid bytes in the buffer + */ + public int getSize() { + if (this.bytes == null) { + throw new IllegalStateException("Uninitialiized. Null constructor " + + "called w/o accompaying readFields invocation"); + } + return this.length; + } + + /** + * @return the number of valid bytes in the buffer + */ + //Should probably deprecate getSize() so that we keep the same calls for all + //byte [] + public int getLength() { + if (this.bytes == null) { + throw new IllegalStateException("Uninitialiized. Null constructor " + + "called w/o accompaying readFields invocation"); + } + return this.length; + } + + /** + * @return offset + */ + public int getOffset(){ + return this.offset; + } + + public void readFields(final DataInput in) throws IOException { + this.length = in.readInt(); + this.bytes = new byte[this.length]; + in.readFully(this.bytes, 0, this.length); + this.offset = 0; + } + + public void write(final DataOutput out) throws IOException { + out.writeInt(this.length); + out.write(this.bytes, this.offset, this.length); + } + + // Below methods copied from BytesWritable + @Override + public int hashCode() { + int hash = 1; + for (int i = offset; i < offset + length; i++) + hash = (31 * hash) + (int)bytes[i]; + return hash; + } + + /** + * Define the sort order of the BytesWritable. + * @param that The other bytes writable + * @return Positive if left is bigger than right, 0 if they are equal, and + * negative if left is smaller than right. + */ + public int compareTo(ImmutableBytesWritable that) { + return WritableComparator.compareBytes( + this.bytes, this.offset, this.length, + that.bytes, that.offset, that.length); + } + + /** + * Compares the bytes in this object to the specified byte array + * @param that + * @return Positive if left is bigger than right, 0 if they are equal, and + * negative if left is smaller than right. + */ + public int compareTo(final byte [] that) { + return WritableComparator.compareBytes( + this.bytes, this.offset, this.length, + that, 0, that.length); + } + + /** + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object right_obj) { + if (right_obj instanceof byte []) { + return compareTo((byte [])right_obj) == 0; + } + if (right_obj instanceof ImmutableBytesWritable) { + return compareTo((ImmutableBytesWritable)right_obj) == 0; + } + return false; + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(3*this.bytes.length); + for (int idx = offset; idx < offset + length; idx++) { + // if not the first, put a blank separator in + if (idx != offset) { + sb.append(' '); + } + String num = Integer.toHexString(bytes[idx]); + // if it is only one digit, add a leading 0. + if (num.length() < 2) { + sb.append('0'); + } + sb.append(num); + } + return sb.toString(); + } + + /** A Comparator optimized for ImmutableBytesWritable. + */ + public static class Comparator extends WritableComparator { + private BytesWritable.Comparator comparator = + new BytesWritable.Comparator(); + + /** constructor */ + public Comparator() { + super(ImmutableBytesWritable.class); + } + + /** + * @see org.apache.hadoop.io.WritableComparator#compare(byte[], int, int, byte[], int, int) + */ + @Override + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { + return comparator.compare(b1, s1, l1, b2, s2, l2); + } + } + + static { // register this comparator + WritableComparator.define(ImmutableBytesWritable.class, new Comparator()); + } + + /** + * @param array List of byte []. + * @return Array of byte []. + */ + public static byte [][] toArray(final List array) { + // List#toArray doesn't work on lists of byte []. + byte[][] results = new byte[array.size()][]; + for (int i = 0; i < array.size(); i++) { + results[i] = array.get(i); + } + return results; + } + + /** + * Returns a copy of the bytes referred to by this writable + */ + public byte[] copyBytes() { + return Arrays.copyOfRange(bytes, offset, offset+length); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java new file mode 100644 index 0000000..33ecb2e --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -0,0 +1,1661 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.lang.reflect.Field; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Comparator; +import java.util.Iterator; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.io.WritableUtils; + +import sun.misc.Unsafe; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Utility class that handles byte arrays, conversions to/from other types, + * comparisons, hash code generation, manufacturing keys for HashMaps or + * HashSets, etc. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Bytes { + + private static final Log LOG = LogFactory.getLog(Bytes.class); + + /** + * Size of boolean in bytes + */ + public static final int SIZEOF_BOOLEAN = Byte.SIZE / Byte.SIZE; + + /** + * Size of byte in bytes + */ + public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN; + + /** + * Size of char in bytes + */ + public static final int SIZEOF_CHAR = Character.SIZE / Byte.SIZE; + + /** + * Size of double in bytes + */ + public static final int SIZEOF_DOUBLE = Double.SIZE / Byte.SIZE; + + /** + * Size of float in bytes + */ + public static final int SIZEOF_FLOAT = Float.SIZE / Byte.SIZE; + + /** + * Size of int in bytes + */ + public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE; + + /** + * Size of long in bytes + */ + public static final int SIZEOF_LONG = Long.SIZE / Byte.SIZE; + + /** + * Size of short in bytes + */ + public static final int SIZEOF_SHORT = Short.SIZE / Byte.SIZE; + + + /** + * Estimate of size cost to pay beyond payload in jvm for instance of byte []. + * Estimate based on study of jhat and jprofiler numbers. + */ + // JHat says BU is 56 bytes. + // SizeOf which uses java.lang.instrument says 24 bytes. (3 longs?) + public static final int ESTIMATED_HEAP_TAX = 16; + + /** + * Byte array comparator class. + */ + public static class ByteArrayComparator implements RawComparator { + /** + * Constructor + */ + public ByteArrayComparator() { + super(); + } + public int compare(byte [] left, byte [] right) { + return compareTo(left, right); + } + public int compare(byte [] b1, int s1, int l1, byte [] b2, int s2, int l2) { + return LexicographicalComparerHolder.BEST_COMPARER. + compareTo(b1, s1, l1, b2, s2, l2); + } + } + + /** + * Pass this to TreeMaps where byte [] are keys. + */ + public static Comparator BYTES_COMPARATOR = + new ByteArrayComparator(); + + /** + * Use comparing byte arrays, byte-by-byte + */ + public static RawComparator BYTES_RAWCOMPARATOR = + new ByteArrayComparator(); + + /** + * Read byte-array written with a WritableableUtils.vint prefix. + * @param in Input to read from. + * @return byte array read off in + * @throws IOException e + */ + public static byte [] readByteArray(final DataInput in) + throws IOException { + int len = WritableUtils.readVInt(in); + if (len < 0) { + throw new NegativeArraySizeException(Integer.toString(len)); + } + byte [] result = new byte[len]; + in.readFully(result, 0, len); + return result; + } + + /** + * Read byte-array written with a WritableableUtils.vint prefix. + * IOException is converted to a RuntimeException. + * @param in Input to read from. + * @return byte array read off in + */ + public static byte [] readByteArrayThrowsRuntime(final DataInput in) { + try { + return readByteArray(in); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Write byte-array with a WritableableUtils.vint prefix. + * @param out output stream to be written to + * @param b array to write + * @throws IOException e + */ + public static void writeByteArray(final DataOutput out, final byte [] b) + throws IOException { + if(b == null) { + WritableUtils.writeVInt(out, 0); + } else { + writeByteArray(out, b, 0, b.length); + } + } + + /** + * Write byte-array to out with a vint length prefix. + * @param out output stream + * @param b array + * @param offset offset into array + * @param length length past offset + * @throws IOException e + */ + public static void writeByteArray(final DataOutput out, final byte [] b, + final int offset, final int length) + throws IOException { + WritableUtils.writeVInt(out, length); + out.write(b, offset, length); + } + + /** + * Write byte-array from src to tgt with a vint length prefix. + * @param tgt target array + * @param tgtOffset offset into target array + * @param src source array + * @param srcOffset source offset + * @param srcLength source length + * @return New offset in src array. + */ + public static int writeByteArray(final byte [] tgt, final int tgtOffset, + final byte [] src, final int srcOffset, final int srcLength) { + byte [] vint = vintToBytes(srcLength); + System.arraycopy(vint, 0, tgt, tgtOffset, vint.length); + int offset = tgtOffset + vint.length; + System.arraycopy(src, srcOffset, tgt, offset, srcLength); + return offset + srcLength; + } + + /** + * Put bytes at the specified byte array position. + * @param tgtBytes the byte array + * @param tgtOffset position in the array + * @param srcBytes array to write out + * @param srcOffset source offset + * @param srcLength source length + * @return incremented offset + */ + public static int putBytes(byte[] tgtBytes, int tgtOffset, byte[] srcBytes, + int srcOffset, int srcLength) { + System.arraycopy(srcBytes, srcOffset, tgtBytes, tgtOffset, srcLength); + return tgtOffset + srcLength; + } + + /** + * Write a single byte out to the specified byte array position. + * @param bytes the byte array + * @param offset position in the array + * @param b byte to write out + * @return incremented offset + */ + public static int putByte(byte[] bytes, int offset, byte b) { + bytes[offset] = b; + return offset + 1; + } + + /** + * Returns a new byte array, copied from the passed ByteBuffer. + * @param bb A ByteBuffer + * @return the byte array + */ + public static byte[] toBytes(ByteBuffer bb) { + int length = bb.limit(); + byte [] result = new byte[length]; + System.arraycopy(bb.array(), bb.arrayOffset(), result, 0, length); + return result; + } + + /** + * @param b Presumed UTF-8 encoded byte array. + * @return String made from b + */ + public static String toString(final byte [] b) { + if (b == null) { + return null; + } + return toString(b, 0, b.length); + } + + /** + * Joins two byte arrays together using a separator. + * @param b1 The first byte array. + * @param sep The separator to use. + * @param b2 The second byte array. + */ + public static String toString(final byte [] b1, + String sep, + final byte [] b2) { + return toString(b1, 0, b1.length) + sep + toString(b2, 0, b2.length); + } + + /** + * This method will convert utf8 encoded bytes into a string. If + * an UnsupportedEncodingException occurs, this method will eat it + * and return null instead. + * + * @param b Presumed UTF-8 encoded byte array. + * @param off offset into array + * @param len length of utf-8 sequence + * @return String made from b or null + */ + public static String toString(final byte [] b, int off, int len) { + if (b == null) { + return null; + } + if (len == 0) { + return ""; + } + try { + return new String(b, off, len, HConstants.UTF8_ENCODING); + } catch (UnsupportedEncodingException e) { + LOG.error("UTF-8 not supported?", e); + return null; + } + } + + /** + * Write a printable representation of a byte array. + * + * @param b byte array + * @return string + * @see #toStringBinary(byte[], int, int) + */ + public static String toStringBinary(final byte [] b) { + if (b == null) + return "null"; + return toStringBinary(b, 0, b.length); + } + + /** + * Converts the given byte buffer, from its array offset to its limit, to + * a string. The position and the mark are ignored. + * + * @param buf a byte buffer + * @return a string representation of the buffer's binary contents + */ + public static String toStringBinary(ByteBuffer buf) { + if (buf == null) + return "null"; + return toStringBinary(buf.array(), buf.arrayOffset(), buf.limit()); + } + + /** + * Write a printable representation of a byte array. Non-printable + * characters are hex escaped in the format \\x%02X, eg: + * \x00 \x05 etc + * + * @param b array to write out + * @param off offset to start at + * @param len length to write + * @return string output + */ + public static String toStringBinary(final byte [] b, int off, int len) { + StringBuilder result = new StringBuilder(); + // Just in case we are passed a 'len' that is > buffer length... + if (off >= b.length) return result.toString(); + if (off + len > b.length) len = b.length - off; + try { + String first = new String(b, off, len, "ISO-8859-1"); + for (int i = 0; i < first.length() ; ++i ) { + int ch = first.charAt(i) & 0xFF; + if ( (ch >= '0' && ch <= '9') + || (ch >= 'A' && ch <= 'Z') + || (ch >= 'a' && ch <= 'z') + || " `~!@#$%^&*()-_=+[]{}\\|;:'\",.<>/?".indexOf(ch) >= 0 ) { + result.append(first.charAt(i)); + } else { + result.append(String.format("\\x%02X", ch)); + } + } + } catch (UnsupportedEncodingException e) { + LOG.error("ISO-8859-1 not supported?", e); + } + return result.toString(); + } + + private static boolean isHexDigit(char c) { + return + (c >= 'A' && c <= 'F') || + (c >= '0' && c <= '9'); + } + + /** + * Takes a ASCII digit in the range A-F0-9 and returns + * the corresponding integer/ordinal value. + * @param ch The hex digit. + * @return The converted hex value as a byte. + */ + public static byte toBinaryFromHex(byte ch) { + if ( ch >= 'A' && ch <= 'F' ) + return (byte) ((byte)10 + (byte) (ch - 'A')); + // else + return (byte) (ch - '0'); + } + + public static byte [] toBytesBinary(String in) { + // this may be bigger than we need, but lets be safe. + byte [] b = new byte[in.length()]; + int size = 0; + for (int i = 0; i < in.length(); ++i) { + char ch = in.charAt(i); + if (ch == '\\') { + // begin hex escape: + char next = in.charAt(i+1); + if (next != 'x') { + // invalid escape sequence, ignore this one. + b[size++] = (byte)ch; + continue; + } + // ok, take next 2 hex digits. + char hd1 = in.charAt(i+2); + char hd2 = in.charAt(i+3); + + // they need to be A-F0-9: + if (!isHexDigit(hd1) || + !isHexDigit(hd2)) { + // bogus escape code, ignore: + continue; + } + // turn hex ASCII digit -> number + byte d = (byte) ((toBinaryFromHex((byte)hd1) << 4) + toBinaryFromHex((byte)hd2)); + + b[size++] = d; + i += 3; // skip 3 + } else { + b[size++] = (byte) ch; + } + } + // resize: + byte [] b2 = new byte[size]; + System.arraycopy(b, 0, b2, 0, size); + return b2; + } + + /** + * Converts a string to a UTF-8 byte array. + * @param s string + * @return the byte array + */ + public static byte[] toBytes(String s) { + try { + return s.getBytes(HConstants.UTF8_ENCODING); + } catch (UnsupportedEncodingException e) { + LOG.error("UTF-8 not supported?", e); + return null; + } + } + + /** + * Convert a boolean to a byte array. True becomes -1 + * and false becomes 0. + * + * @param b value + * @return b encoded in a byte array. + */ + public static byte [] toBytes(final boolean b) { + return new byte[] { b ? (byte) -1 : (byte) 0 }; + } + + /** + * Reverses {@link #toBytes(boolean)} + * @param b array + * @return True or false. + */ + public static boolean toBoolean(final byte [] b) { + if (b.length != 1) { + throw new IllegalArgumentException("Array has wrong size: " + b.length); + } + return b[0] != (byte) 0; + } + + /** + * Convert a long value to a byte array using big-endian. + * + * @param val value to convert + * @return the byte array + */ + public static byte[] toBytes(long val) { + byte [] b = new byte[8]; + for (int i = 7; i > 0; i--) { + b[i] = (byte) val; + val >>>= 8; + } + b[0] = (byte) val; + return b; + } + + /** + * Converts a byte array to a long value. Reverses + * {@link #toBytes(long)} + * @param bytes array + * @return the long value + */ + public static long toLong(byte[] bytes) { + return toLong(bytes, 0, SIZEOF_LONG); + } + + /** + * Converts a byte array to a long value. Assumes there will be + * {@link #SIZEOF_LONG} bytes available. + * + * @param bytes bytes + * @param offset offset + * @return the long value + */ + public static long toLong(byte[] bytes, int offset) { + return toLong(bytes, offset, SIZEOF_LONG); + } + + /** + * Converts a byte array to a long value. + * + * @param bytes array of bytes + * @param offset offset into array + * @param length length of data (must be {@link #SIZEOF_LONG}) + * @return the long value + * @throws IllegalArgumentException if length is not {@link #SIZEOF_LONG} or + * if there's not enough room in the array at the offset indicated. + */ + public static long toLong(byte[] bytes, int offset, final int length) { + if (length != SIZEOF_LONG || offset + length > bytes.length) { + throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_LONG); + } + long l = 0; + for(int i = offset; i < offset + length; i++) { + l <<= 8; + l ^= bytes[i] & 0xFF; + } + return l; + } + + private static IllegalArgumentException + explainWrongLengthOrOffset(final byte[] bytes, + final int offset, + final int length, + final int expectedLength) { + String reason; + if (length != expectedLength) { + reason = "Wrong length: " + length + ", expected " + expectedLength; + } else { + reason = "offset (" + offset + ") + length (" + length + ") exceed the" + + " capacity of the array: " + bytes.length; + } + return new IllegalArgumentException(reason); + } + + /** + * Put a long value out to the specified byte array position. + * @param bytes the byte array + * @param offset position in the array + * @param val long to write out + * @return incremented offset + * @throws IllegalArgumentException if the byte array given doesn't have + * enough room at the offset specified. + */ + public static int putLong(byte[] bytes, int offset, long val) { + if (bytes.length - offset < SIZEOF_LONG) { + throw new IllegalArgumentException("Not enough room to put a long at" + + " offset " + offset + " in a " + bytes.length + " byte array"); + } + for(int i = offset + 7; i > offset; i--) { + bytes[i] = (byte) val; + val >>>= 8; + } + bytes[offset] = (byte) val; + return offset + SIZEOF_LONG; + } + + /** + * Presumes float encoded as IEEE 754 floating-point "single format" + * @param bytes byte array + * @return Float made from passed byte array. + */ + public static float toFloat(byte [] bytes) { + return toFloat(bytes, 0); + } + + /** + * Presumes float encoded as IEEE 754 floating-point "single format" + * @param bytes array to convert + * @param offset offset into array + * @return Float made from passed byte array. + */ + public static float toFloat(byte [] bytes, int offset) { + return Float.intBitsToFloat(toInt(bytes, offset, SIZEOF_INT)); + } + + /** + * @param bytes byte array + * @param offset offset to write to + * @param f float value + * @return New offset in bytes + */ + public static int putFloat(byte [] bytes, int offset, float f) { + return putInt(bytes, offset, Float.floatToRawIntBits(f)); + } + + /** + * @param f float value + * @return the float represented as byte [] + */ + public static byte [] toBytes(final float f) { + // Encode it as int + return Bytes.toBytes(Float.floatToRawIntBits(f)); + } + + /** + * @param bytes byte array + * @return Return double made from passed bytes. + */ + public static double toDouble(final byte [] bytes) { + return toDouble(bytes, 0); + } + + /** + * @param bytes byte array + * @param offset offset where double is + * @return Return double made from passed bytes. + */ + public static double toDouble(final byte [] bytes, final int offset) { + return Double.longBitsToDouble(toLong(bytes, offset, SIZEOF_LONG)); + } + + /** + * @param bytes byte array + * @param offset offset to write to + * @param d value + * @return New offset into array bytes + */ + public static int putDouble(byte [] bytes, int offset, double d) { + return putLong(bytes, offset, Double.doubleToLongBits(d)); + } + + /** + * Serialize a double as the IEEE 754 double format output. The resultant + * array will be 8 bytes long. + * + * @param d value + * @return the double represented as byte [] + */ + public static byte [] toBytes(final double d) { + // Encode it as a long + return Bytes.toBytes(Double.doubleToRawLongBits(d)); + } + + /** + * Convert an int value to a byte array + * @param val value + * @return the byte array + */ + public static byte[] toBytes(int val) { + byte [] b = new byte[4]; + for(int i = 3; i > 0; i--) { + b[i] = (byte) val; + val >>>= 8; + } + b[0] = (byte) val; + return b; + } + + /** + * Converts a byte array to an int value + * @param bytes byte array + * @return the int value + */ + public static int toInt(byte[] bytes) { + return toInt(bytes, 0, SIZEOF_INT); + } + + /** + * Converts a byte array to an int value + * @param bytes byte array + * @param offset offset into array + * @return the int value + */ + public static int toInt(byte[] bytes, int offset) { + return toInt(bytes, offset, SIZEOF_INT); + } + + /** + * Converts a byte array to an int value + * @param bytes byte array + * @param offset offset into array + * @param length length of int (has to be {@link #SIZEOF_INT}) + * @return the int value + * @throws IllegalArgumentException if length is not {@link #SIZEOF_INT} or + * if there's not enough room in the array at the offset indicated. + */ + public static int toInt(byte[] bytes, int offset, final int length) { + if (length != SIZEOF_INT || offset + length > bytes.length) { + throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_INT); + } + int n = 0; + for(int i = offset; i < (offset + length); i++) { + n <<= 8; + n ^= bytes[i] & 0xFF; + } + return n; + } + + /** + * Put an int value out to the specified byte array position. + * @param bytes the byte array + * @param offset position in the array + * @param val int to write out + * @return incremented offset + * @throws IllegalArgumentException if the byte array given doesn't have + * enough room at the offset specified. + */ + public static int putInt(byte[] bytes, int offset, int val) { + if (bytes.length - offset < SIZEOF_INT) { + throw new IllegalArgumentException("Not enough room to put an int at" + + " offset " + offset + " in a " + bytes.length + " byte array"); + } + for(int i= offset + 3; i > offset; i--) { + bytes[i] = (byte) val; + val >>>= 8; + } + bytes[offset] = (byte) val; + return offset + SIZEOF_INT; + } + + /** + * Convert a short value to a byte array of {@link #SIZEOF_SHORT} bytes long. + * @param val value + * @return the byte array + */ + public static byte[] toBytes(short val) { + byte[] b = new byte[SIZEOF_SHORT]; + b[1] = (byte) val; + val >>= 8; + b[0] = (byte) val; + return b; + } + + /** + * Converts a byte array to a short value + * @param bytes byte array + * @return the short value + */ + public static short toShort(byte[] bytes) { + return toShort(bytes, 0, SIZEOF_SHORT); + } + + /** + * Converts a byte array to a short value + * @param bytes byte array + * @param offset offset into array + * @return the short value + */ + public static short toShort(byte[] bytes, int offset) { + return toShort(bytes, offset, SIZEOF_SHORT); + } + + /** + * Converts a byte array to a short value + * @param bytes byte array + * @param offset offset into array + * @param length length, has to be {@link #SIZEOF_SHORT} + * @return the short value + * @throws IllegalArgumentException if length is not {@link #SIZEOF_SHORT} + * or if there's not enough room in the array at the offset indicated. + */ + public static short toShort(byte[] bytes, int offset, final int length) { + if (length != SIZEOF_SHORT || offset + length > bytes.length) { + throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_SHORT); + } + short n = 0; + n ^= bytes[offset] & 0xFF; + n <<= 8; + n ^= bytes[offset+1] & 0xFF; + return n; + } + + /** + * This method will get a sequence of bytes from pos -> limit, + * but will restore pos after. + * @param buf + * @return byte array + */ + public static byte[] getBytes(ByteBuffer buf) { + int savedPos = buf.position(); + byte [] newBytes = new byte[buf.remaining()]; + buf.get(newBytes); + buf.position(savedPos); + return newBytes; + } + + /** + * Put a short value out to the specified byte array position. + * @param bytes the byte array + * @param offset position in the array + * @param val short to write out + * @return incremented offset + * @throws IllegalArgumentException if the byte array given doesn't have + * enough room at the offset specified. + */ + public static int putShort(byte[] bytes, int offset, short val) { + if (bytes.length - offset < SIZEOF_SHORT) { + throw new IllegalArgumentException("Not enough room to put a short at" + + " offset " + offset + " in a " + bytes.length + " byte array"); + } + bytes[offset+1] = (byte) val; + val >>= 8; + bytes[offset] = (byte) val; + return offset + SIZEOF_SHORT; + } + + /** + * Convert a BigDecimal value to a byte array + * + * @param val + * @return the byte array + */ + public static byte[] toBytes(BigDecimal val) { + byte[] valueBytes = val.unscaledValue().toByteArray(); + byte[] result = new byte[valueBytes.length + SIZEOF_INT]; + int offset = putInt(result, 0, val.scale()); + putBytes(result, offset, valueBytes, 0, valueBytes.length); + return result; + } + + + /** + * Converts a byte array to a BigDecimal + * + * @param bytes + * @return the char value + */ + public static BigDecimal toBigDecimal(byte[] bytes) { + return toBigDecimal(bytes, 0, bytes.length); + } + + /** + * Converts a byte array to a BigDecimal value + * + * @param bytes + * @param offset + * @param length + * @return the char value + */ + public static BigDecimal toBigDecimal(byte[] bytes, int offset, final int length) { + if (bytes == null || length < SIZEOF_INT + 1 || + (offset + length > bytes.length)) { + return null; + } + + int scale = toInt(bytes, offset); + byte[] tcBytes = new byte[length - SIZEOF_INT]; + System.arraycopy(bytes, offset + SIZEOF_INT, tcBytes, 0, length - SIZEOF_INT); + return new BigDecimal(new BigInteger(tcBytes), scale); + } + + /** + * Put a BigDecimal value out to the specified byte array position. + * + * @param bytes the byte array + * @param offset position in the array + * @param val BigDecimal to write out + * @return incremented offset + */ + public static int putBigDecimal(byte[] bytes, int offset, BigDecimal val) { + if (bytes == null) { + return offset; + } + + byte[] valueBytes = val.unscaledValue().toByteArray(); + byte[] result = new byte[valueBytes.length + SIZEOF_INT]; + offset = putInt(result, offset, val.scale()); + return putBytes(result, offset, valueBytes, 0, valueBytes.length); + } + + /** + * @param vint Integer to make a vint of. + * @return Vint as bytes array. + */ + public static byte [] vintToBytes(final long vint) { + long i = vint; + int size = WritableUtils.getVIntSize(i); + byte [] result = new byte[size]; + int offset = 0; + if (i >= -112 && i <= 127) { + result[offset] = (byte) i; + return result; + } + + int len = -112; + if (i < 0) { + i ^= -1L; // take one's complement' + len = -120; + } + + long tmp = i; + while (tmp != 0) { + tmp = tmp >> 8; + len--; + } + + result[offset++] = (byte) len; + + len = (len < -120) ? -(len + 120) : -(len + 112); + + for (int idx = len; idx != 0; idx--) { + int shiftbits = (idx - 1) * 8; + long mask = 0xFFL << shiftbits; + result[offset++] = (byte)((i & mask) >> shiftbits); + } + return result; + } + + /** + * @param buffer buffer to convert + * @return vint bytes as an integer. + */ + public static long bytesToVint(final byte [] buffer) { + int offset = 0; + byte firstByte = buffer[offset++]; + int len = WritableUtils.decodeVIntSize(firstByte); + if (len == 1) { + return firstByte; + } + long i = 0; + for (int idx = 0; idx < len-1; idx++) { + byte b = buffer[offset++]; + i = i << 8; + i = i | (b & 0xFF); + } + return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); + } + + /** + * Reads a zero-compressed encoded long from input stream and returns it. + * @param buffer Binary array + * @param offset Offset into array at which vint begins. + * @throws java.io.IOException e + * @return deserialized long from stream. + */ + public static long readVLong(final byte [] buffer, final int offset) + throws IOException { + byte firstByte = buffer[offset]; + int len = WritableUtils.decodeVIntSize(firstByte); + if (len == 1) { + return firstByte; + } + long i = 0; + for (int idx = 0; idx < len-1; idx++) { + byte b = buffer[offset + 1 + idx]; + i = i << 8; + i = i | (b & 0xFF); + } + return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); + } + + /** + * @param left left operand + * @param right right operand + * @return 0 if equal, < 0 if left is less than right, etc. + */ + public static int compareTo(final byte [] left, final byte [] right) { + return LexicographicalComparerHolder.BEST_COMPARER. + compareTo(left, 0, left.length, right, 0, right.length); + } + + /** + * Lexicographically compare two arrays. + * + * @param buffer1 left operand + * @param buffer2 right operand + * @param offset1 Where to start comparing in the left buffer + * @param offset2 Where to start comparing in the right buffer + * @param length1 How much to compare from the left buffer + * @param length2 How much to compare from the right buffer + * @return 0 if equal, < 0 if left is less than right, etc. + */ + public static int compareTo(byte[] buffer1, int offset1, int length1, + byte[] buffer2, int offset2, int length2) { + return LexicographicalComparerHolder.BEST_COMPARER. + compareTo(buffer1, offset1, length1, buffer2, offset2, length2); + } + + interface Comparer { + abstract public int compareTo(T buffer1, int offset1, int length1, + T buffer2, int offset2, int length2); + } + + @VisibleForTesting + static Comparer lexicographicalComparerJavaImpl() { + return LexicographicalComparerHolder.PureJavaComparer.INSTANCE; + } + + /** + * Provides a lexicographical comparer implementation; either a Java + * implementation or a faster implementation based on {@link Unsafe}. + * + *

Uses reflection to gracefully fall back to the Java implementation if + * {@code Unsafe} isn't available. + */ + @VisibleForTesting + static class LexicographicalComparerHolder { + static final String UNSAFE_COMPARER_NAME = + LexicographicalComparerHolder.class.getName() + "$UnsafeComparer"; + + static final Comparer BEST_COMPARER = getBestComparer(); + /** + * Returns the Unsafe-using Comparer, or falls back to the pure-Java + * implementation if unable to do so. + */ + static Comparer getBestComparer() { + try { + Class theClass = Class.forName(UNSAFE_COMPARER_NAME); + + // yes, UnsafeComparer does implement Comparer + @SuppressWarnings("unchecked") + Comparer comparer = + (Comparer) theClass.getEnumConstants()[0]; + return comparer; + } catch (Throwable t) { // ensure we really catch *everything* + return lexicographicalComparerJavaImpl(); + } + } + + enum PureJavaComparer implements Comparer { + INSTANCE; + + @Override + public int compareTo(byte[] buffer1, int offset1, int length1, + byte[] buffer2, int offset2, int length2) { + // Short circuit equal case + if (buffer1 == buffer2 && + offset1 == offset2 && + length1 == length2) { + return 0; + } + // Bring WritableComparator code local + int end1 = offset1 + length1; + int end2 = offset2 + length2; + for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) { + int a = (buffer1[i] & 0xff); + int b = (buffer2[j] & 0xff); + if (a != b) { + return a - b; + } + } + return length1 - length2; + } + } + + @VisibleForTesting + enum UnsafeComparer implements Comparer { + INSTANCE; + + static final Unsafe theUnsafe; + + /** The offset to the first element in a byte array. */ + static final int BYTE_ARRAY_BASE_OFFSET; + + static { + theUnsafe = (Unsafe) AccessController.doPrivileged( + new PrivilegedAction() { + @Override + public Object run() { + try { + Field f = Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return f.get(null); + } catch (NoSuchFieldException e) { + // It doesn't matter what we throw; + // it's swallowed in getBestComparer(). + throw new Error(); + } catch (IllegalAccessException e) { + throw new Error(); + } + } + }); + + BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class); + + // sanity check - this should never fail + if (theUnsafe.arrayIndexScale(byte[].class) != 1) { + throw new AssertionError(); + } + } + + static final boolean littleEndian = + ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN); + + /** + * Returns true if x1 is less than x2, when both values are treated as + * unsigned. + */ + static boolean lessThanUnsigned(long x1, long x2) { + return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE); + } + + /** + * Lexicographically compare two arrays. + * + * @param buffer1 left operand + * @param buffer2 right operand + * @param offset1 Where to start comparing in the left buffer + * @param offset2 Where to start comparing in the right buffer + * @param length1 How much to compare from the left buffer + * @param length2 How much to compare from the right buffer + * @return 0 if equal, < 0 if left is less than right, etc. + */ + @Override + public int compareTo(byte[] buffer1, int offset1, int length1, + byte[] buffer2, int offset2, int length2) { + // Short circuit equal case + if (buffer1 == buffer2 && + offset1 == offset2 && + length1 == length2) { + return 0; + } + int minLength = Math.min(length1, length2); + int minWords = minLength / SIZEOF_LONG; + int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET; + int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET; + + /* + * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a + * time is no slower than comparing 4 bytes at a time even on 32-bit. + * On the other hand, it is substantially faster on 64-bit. + */ + for (int i = 0; i < minWords * SIZEOF_LONG; i += SIZEOF_LONG) { + long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i); + long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i); + long diff = lw ^ rw; + + if (diff != 0) { + if (!littleEndian) { + return lessThanUnsigned(lw, rw) ? -1 : 1; + } + + // Use binary search + int n = 0; + int y; + int x = (int) diff; + if (x == 0) { + x = (int) (diff >>> 32); + n = 32; + } + + y = x << 16; + if (y == 0) { + n += 16; + } else { + x = y; + } + + y = x << 8; + if (y == 0) { + n += 8; + } + return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL)); + } + } + + // The epilogue to cover the last (minLength % 8) elements. + for (int i = minWords * SIZEOF_LONG; i < minLength; i++) { + int a = (buffer1[offset1 + i] & 0xff); + int b = (buffer2[offset2 + i] & 0xff); + if (a != b) { + return a - b; + } + } + return length1 - length2; + } + } + } + + /** + * @param left left operand + * @param right right operand + * @return True if equal + */ + public static boolean equals(final byte [] left, final byte [] right) { + // Could use Arrays.equals? + //noinspection SimplifiableConditionalExpression + if (left == right) return true; + if (left == null || right == null) return false; + if (left.length != right.length) return false; + if (left.length == 0) return true; + + // Since we're often comparing adjacent sorted data, + // it's usual to have equal arrays except for the very last byte + // so check that first + if (left[left.length - 1] != right[right.length - 1]) return false; + + return compareTo(left, right) == 0; + } + + public static boolean equals(final byte[] left, int leftOffset, int leftLen, + final byte[] right, int rightOffset, int rightLen) { + // short circuit case + if (left == right && + leftOffset == rightOffset && + leftLen == rightLen) { + return true; + } + // different lengths fast check + if (leftLen != rightLen) { + return false; + } + if (leftLen == 0) { + return true; + } + + // Since we're often comparing adjacent sorted data, + // it's usual to have equal arrays except for the very last byte + // so check that first + if (left[leftOffset + leftLen - 1] != right[rightOffset + rightLen - 1]) return false; + + return LexicographicalComparerHolder.BEST_COMPARER. + compareTo(left, leftOffset, leftLen, right, rightOffset, rightLen) == 0; + } + + + /** + * Return true if the byte array on the right is a prefix of the byte + * array on the left. + */ + public static boolean startsWith(byte[] bytes, byte[] prefix) { + return bytes != null && prefix != null && + bytes.length >= prefix.length && + LexicographicalComparerHolder.BEST_COMPARER. + compareTo(bytes, 0, prefix.length, prefix, 0, prefix.length) == 0; + } + + /** + * @param b bytes to hash + * @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the + * passed in array. This method is what {@link org.apache.hadoop.io.Text} and + * {@link ImmutableBytesWritable} use calculating hash code. + */ + public static int hashCode(final byte [] b) { + return hashCode(b, b.length); + } + + /** + * @param b value + * @param length length of the value + * @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the + * passed in array. This method is what {@link org.apache.hadoop.io.Text} and + * {@link ImmutableBytesWritable} use calculating hash code. + */ + public static int hashCode(final byte [] b, final int length) { + return WritableComparator.hashBytes(b, length); + } + + /** + * @param b bytes to hash + * @return A hash of b as an Integer that can be used as key in + * Maps. + */ + public static Integer mapKey(final byte [] b) { + return hashCode(b); + } + + /** + * @param b bytes to hash + * @param length length to hash + * @return A hash of b as an Integer that can be used as key in + * Maps. + */ + public static Integer mapKey(final byte [] b, final int length) { + return hashCode(b, length); + } + + /** + * @param a lower half + * @param b upper half + * @return New array that has a in lower half and b in upper half. + */ + public static byte [] add(final byte [] a, final byte [] b) { + return add(a, b, HConstants.EMPTY_BYTE_ARRAY); + } + + /** + * @param a first third + * @param b second third + * @param c third third + * @return New array made from a, b and c + */ + public static byte [] add(final byte [] a, final byte [] b, final byte [] c) { + byte [] result = new byte[a.length + b.length + c.length]; + System.arraycopy(a, 0, result, 0, a.length); + System.arraycopy(b, 0, result, a.length, b.length); + System.arraycopy(c, 0, result, a.length + b.length, c.length); + return result; + } + + /** + * @param a array + * @param length amount of bytes to grab + * @return First length bytes from a + */ + public static byte [] head(final byte [] a, final int length) { + if (a.length < length) { + return null; + } + byte [] result = new byte[length]; + System.arraycopy(a, 0, result, 0, length); + return result; + } + + /** + * @param a array + * @param length amount of bytes to snarf + * @return Last length bytes from a + */ + public static byte [] tail(final byte [] a, final int length) { + if (a.length < length) { + return null; + } + byte [] result = new byte[length]; + System.arraycopy(a, a.length - length, result, 0, length); + return result; + } + + /** + * @param a array + * @param length new array size + * @return Value in a plus length prepended 0 bytes + */ + public static byte [] padHead(final byte [] a, final int length) { + byte [] padding = new byte[length]; + for (int i = 0; i < length; i++) { + padding[i] = 0; + } + return add(padding,a); + } + + /** + * @param a array + * @param length new array size + * @return Value in a plus length appended 0 bytes + */ + public static byte [] padTail(final byte [] a, final int length) { + byte [] padding = new byte[length]; + for (int i = 0; i < length; i++) { + padding[i] = 0; + } + return add(a,padding); + } + + /** + * Split passed range. Expensive operation relatively. Uses BigInteger math. + * Useful splitting ranges for MapReduce jobs. + * @param a Beginning of range + * @param b End of range + * @param num Number of times to split range. Pass 1 if you want to split + * the range in two; i.e. one split. + * @return Array of dividing values + */ + public static byte [][] split(final byte [] a, final byte [] b, final int num) { + return split(a, b, false, num); + } + + /** + * Split passed range. Expensive operation relatively. Uses BigInteger math. + * Useful splitting ranges for MapReduce jobs. + * @param a Beginning of range + * @param b End of range + * @param inclusive Whether the end of range is prefix-inclusive or is + * considered an exclusive boundary. Automatic splits are generally exclusive + * and manual splits with an explicit range utilize an inclusive end of range. + * @param num Number of times to split range. Pass 1 if you want to split + * the range in two; i.e. one split. + * @return Array of dividing values + */ + public static byte[][] split(final byte[] a, final byte[] b, + boolean inclusive, final int num) { + byte[][] ret = new byte[num + 2][]; + int i = 0; + Iterable iter = iterateOnSplits(a, b, inclusive, num); + if (iter == null) + return null; + for (byte[] elem : iter) { + ret[i++] = elem; + } + return ret; + } + + /** + * Iterate over keys within the passed range, splitting at an [a,b) boundary. + */ + public static Iterable iterateOnSplits(final byte[] a, + final byte[] b, final int num) + { + return iterateOnSplits(a, b, false, num); + } + + /** + * Iterate over keys within the passed range. + */ + public static Iterable iterateOnSplits( + final byte[] a, final byte[]b, boolean inclusive, final int num) + { + byte [] aPadded; + byte [] bPadded; + if (a.length < b.length) { + aPadded = padTail(a, b.length - a.length); + bPadded = b; + } else if (b.length < a.length) { + aPadded = a; + bPadded = padTail(b, a.length - b.length); + } else { + aPadded = a; + bPadded = b; + } + if (compareTo(aPadded,bPadded) >= 0) { + throw new IllegalArgumentException("b <= a"); + } + if (num <= 0) { + throw new IllegalArgumentException("num cannot be < 0"); + } + byte [] prependHeader = {1, 0}; + final BigInteger startBI = new BigInteger(add(prependHeader, aPadded)); + final BigInteger stopBI = new BigInteger(add(prependHeader, bPadded)); + BigInteger diffBI = stopBI.subtract(startBI); + if (inclusive) { + diffBI = diffBI.add(BigInteger.ONE); + } + final BigInteger splitsBI = BigInteger.valueOf(num + 1); + if(diffBI.compareTo(splitsBI) < 0) { + return null; + } + final BigInteger intervalBI; + try { + intervalBI = diffBI.divide(splitsBI); + } catch(Exception e) { + LOG.error("Exception caught during division", e); + return null; + } + + final Iterator iterator = new Iterator() { + private int i = -1; + + @Override + public boolean hasNext() { + return i < num+1; + } + + @Override + public byte[] next() { + i++; + if (i == 0) return a; + if (i == num + 1) return b; + + BigInteger curBI = startBI.add(intervalBI.multiply(BigInteger.valueOf(i))); + byte [] padded = curBI.toByteArray(); + if (padded[1] == 0) + padded = tail(padded, padded.length - 2); + else + padded = tail(padded, padded.length - 1); + return padded; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + + return new Iterable() { + @Override + public Iterator iterator() { + return iterator; + } + }; + } + + /** + * @param bytes array to hash + * @param offset offset to start from + * @param length length to hash + * */ + public static int hashCode(byte[] bytes, int offset, int length) { + int hash = 1; + for (int i = offset; i < offset + length; i++) + hash = (31 * hash) + (int) bytes[i]; + return hash; + } + + /** + * @param t operands + * @return Array of byte arrays made from passed array of Text + */ + public static byte [][] toByteArrays(final String [] t) { + byte [][] result = new byte[t.length][]; + for (int i = 0; i < t.length; i++) { + result[i] = Bytes.toBytes(t[i]); + } + return result; + } + + /** + * @param column operand + * @return A byte array of a byte array where first and only entry is + * column + */ + public static byte [][] toByteArrays(final String column) { + return toByteArrays(toBytes(column)); + } + + /** + * @param column operand + * @return A byte array of a byte array where first and only entry is + * column + */ + public static byte [][] toByteArrays(final byte [] column) { + byte [][] result = new byte[1][]; + result[0] = column; + return result; + } + + /** + * Binary search for keys in indexes. + * + * @param arr array of byte arrays to search for + * @param key the key you want to find + * @param offset the offset in the key you want to find + * @param length the length of the key + * @param comparator a comparator to compare. + * @return zero-based index of the key, if the key is present in the array. + * Otherwise, a value -(i + 1) such that the key is between arr[i - + * 1] and arr[i] non-inclusively, where i is in [0, i], if we define + * arr[-1] = -Inf and arr[N] = Inf for an N-element array. The above + * means that this function can return 2N + 1 different values + * ranging from -(N + 1) to N - 1. + */ + public static int binarySearch(byte [][]arr, byte []key, int offset, + int length, RawComparator comparator) { + int low = 0; + int high = arr.length - 1; + + while (low <= high) { + int mid = (low+high) >>> 1; + // we have to compare in this order, because the comparator order + // has special logic when the 'left side' is a special key. + int cmp = comparator.compare(key, offset, length, + arr[mid], 0, arr[mid].length); + // key lives above the midpoint + if (cmp > 0) + low = mid + 1; + // key lives below the midpoint + else if (cmp < 0) + high = mid - 1; + // BAM. how often does this really happen? + else + return mid; + } + return - (low+1); + } + + /** + * Bytewise binary increment/deincrement of long contained in byte array + * on given amount. + * + * @param value - array of bytes containing long (length <= SIZEOF_LONG) + * @param amount value will be incremented on (deincremented if negative) + * @return array of bytes containing incremented long (length == SIZEOF_LONG) + */ + public static byte [] incrementBytes(byte[] value, long amount) + { + byte[] val = value; + if (val.length < SIZEOF_LONG) { + // Hopefully this doesn't happen too often. + byte [] newvalue; + if (val[0] < 0) { + newvalue = new byte[]{-1, -1, -1, -1, -1, -1, -1, -1}; + } else { + newvalue = new byte[SIZEOF_LONG]; + } + System.arraycopy(val, 0, newvalue, newvalue.length - val.length, + val.length); + val = newvalue; + } else if (val.length > SIZEOF_LONG) { + throw new IllegalArgumentException("Increment Bytes - value too big: " + + val.length); + } + if(amount == 0) return val; + if(val[0] < 0){ + return binaryIncrementNeg(val, amount); + } + return binaryIncrementPos(val, amount); + } + + /* increment/deincrement for positive value */ + private static byte [] binaryIncrementPos(byte [] value, long amount) { + long amo = amount; + int sign = 1; + if (amount < 0) { + amo = -amount; + sign = -1; + } + for(int i=0;i> 8); + int val = value[value.length-i-1] & 0x0ff; + int total = val + cur; + if(total > 255) { + amo += sign; + total %= 256; + } else if (total < 0) { + amo -= sign; + } + value[value.length-i-1] = (byte)total; + if (amo == 0) return value; + } + return value; + } + + /* increment/deincrement for negative value */ + private static byte [] binaryIncrementNeg(byte [] value, long amount) { + long amo = amount; + int sign = 1; + if (amount < 0) { + amo = -amount; + sign = -1; + } + for(int i=0;i> 8); + int val = ((~value[value.length-i-1]) & 0x0ff) + 1; + int total = cur - val; + if(total >= 0) { + amo += sign; + } else if (total < -256) { + amo -= sign; + total %= 256; + } + value[value.length-i-1] = (byte)total; + if (amo == 0) return value; + } + return value; + } + + /** + * Writes a string as a fixed-size field, padded with zeros. + */ + public static void writeStringFixedSize(final DataOutput out, String s, + int size) throws IOException { + byte[] b = toBytes(s); + if (b.length > size) { + throw new IOException("Trying to write " + b.length + " bytes (" + + toStringBinary(b) + ") into a field of length " + size); + } + + out.writeBytes(s); + for (int i = 0; i < size - s.length(); ++i) + out.writeByte(0); + } + + /** + * Reads a fixed-size field and interprets it as a string padded with zeros. + */ + public static String readStringFixedSize(final DataInput in, int size) + throws IOException { + byte[] b = new byte[size]; + in.readFully(b); + int n = b.length; + while (n > 0 && b[n - 1] == 0) + --n; + + return toString(b, 0, n); + } + +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java new file mode 100644 index 0000000..4bee7b6 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java @@ -0,0 +1,118 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import org.apache.commons.logging.LogFactory; +import java.io.PrintWriter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.VersionAnnotation; +import org.apache.commons.logging.Log; + +/** + * This class finds the package info for hbase and the VersionAnnotation + * information. Taken from hadoop. Only name of annotation is different. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class VersionInfo { + private static final Log LOG = LogFactory.getLog(VersionInfo.class.getName()); + private static Package myPackage; + private static VersionAnnotation version; + + static { + myPackage = VersionAnnotation.class.getPackage(); + version = myPackage.getAnnotation(VersionAnnotation.class); + } + + /** + * Get the meta-data for the hbase package. + * @return package + */ + static Package getPackage() { + return myPackage; + } + + /** + * Get the hbase version. + * @return the hbase version string, eg. "0.6.3-dev" + */ + public static String getVersion() { + return version != null ? version.version() : "Unknown"; + } + + /** + * Get the subversion revision number for the root directory + * @return the revision number, eg. "451451" + */ + public static String getRevision() { + return version != null ? version.revision() : "Unknown"; + } + + /** + * The date that hbase was compiled. + * @return the compilation date in unix date format + */ + public static String getDate() { + return version != null ? version.date() : "Unknown"; + } + + /** + * The user that compiled hbase. + * @return the username of the user + */ + public static String getUser() { + return version != null ? version.user() : "Unknown"; + } + + /** + * Get the subversion URL for the root hbase directory. + * @return the url + */ + public static String getUrl() { + return version != null ? version.url() : "Unknown"; + } + + static String[] versionReport() { + return new String[] { + "HBase " + getVersion(), + "Subversion " + getUrl() + " -r " + getRevision(), + "Compiled by " + getUser() + " on " + getDate() + }; + } + + public static void writeTo(PrintWriter out) { + for (String line : versionReport()) { + out.println(line); + } + } + + public static void logVersion() { + for (String line : versionReport()) { + LOG.info(line); + } + } + + public static void main(String[] args) { + logVersion(); + } +} diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index bd6aa91..bfef5e5 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -296,6 +296,11 @@ + + + org.apache.hbase + hbase-common + com.google.guava diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java deleted file mode 100644 index 19cca3c..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.util.Map.Entry; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.util.VersionInfo; - -/** - * Adds HBase configuration files to a Configuration - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class HBaseConfiguration extends Configuration { - - private static final Log LOG = LogFactory.getLog(HBaseConfiguration.class); - - // a constant to convert a fraction to a percentage - private static final int CONVERT_TO_PERCENTAGE = 100; - - /** - * Instantinating HBaseConfiguration() is deprecated. Please use - * HBaseConfiguration#create() to construct a plain Configuration - */ - @Deprecated - public HBaseConfiguration() { - //TODO:replace with private constructor, HBaseConfiguration should not extend Configuration - super(); - addHbaseResources(this); - LOG.warn("instantiating HBaseConfiguration() is deprecated. Please use" + - " HBaseConfiguration#create() to construct a plain Configuration"); - } - - /** - * Instantiating HBaseConfiguration() is deprecated. Please use - * HBaseConfiguration#create(conf) to construct a plain Configuration - */ - @Deprecated - public HBaseConfiguration(final Configuration c) { - //TODO:replace with private constructor - this(); - merge(this, c); - } - - private static void checkDefaultsVersion(Configuration conf) { - if (conf.getBoolean("hbase.defaults.for.version.skip", Boolean.FALSE)) return; - String defaultsVersion = conf.get("hbase.defaults.for.version"); - String thisVersion = VersionInfo.getVersion(); - if (!thisVersion.equals(defaultsVersion)) { - throw new RuntimeException( - "hbase-default.xml file seems to be for and old version of HBase (" + - defaultsVersion + "), this version is " + thisVersion); - } - } - - private static void checkForClusterFreeMemoryLimit(Configuration conf) { - float globalMemstoreLimit = conf.getFloat("hbase.regionserver.global.memstore.upperLimit", 0.4f); - int gml = (int)(globalMemstoreLimit * CONVERT_TO_PERCENTAGE); - float blockCacheUpperLimit = - conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, - HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); - int bcul = (int)(blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); - if (CONVERT_TO_PERCENTAGE - (gml + bcul) - < (int)(CONVERT_TO_PERCENTAGE * - HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD)) { - throw new RuntimeException( - "Current heap configuration for MemStore and BlockCache exceeds " + - "the threshold required for successful cluster operation. " + - "The combined value cannot exceed 0.8. Please check " + - "the settings for hbase.regionserver.global.memstore.upperLimit and " + - "hfile.block.cache.size in your configuration. " + - "hbase.regionserver.global.memstore.upperLimit is " + - globalMemstoreLimit + - " hfile.block.cache.size is " + blockCacheUpperLimit); - } - } - - public static Configuration addHbaseResources(Configuration conf) { - conf.addResource("hbase-default.xml"); - conf.addResource("hbase-site.xml"); - - checkDefaultsVersion(conf); - checkForClusterFreeMemoryLimit(conf); - return conf; - } - - /** - * Creates a Configuration with HBase resources - * @return a Configuration with HBase resources - */ - public static Configuration create() { - Configuration conf = new Configuration(); - return addHbaseResources(conf); - } - - /** - * Creates a clone of passed configuration. - * @param that Configuration to clone. - * @return a clone of passed configuration. - */ - public static Configuration create(final Configuration that) { - return new Configuration(that); - } - - /** - * Merge two configurations. - * @param destConf the configuration that will be overwritten with items - * from the srcConf - * @param srcConf the source configuration - **/ - public static void merge(Configuration destConf, Configuration srcConf) { - for (Entry e : srcConf) { - destConf.set(e.getKey(), e.getValue()); - } - } - - /** - * - * @return whether to show HBase Configuration in servlet - */ - public static boolean isShowConfInServlet() { - boolean isShowConf = false; - try { - if (Class.forName("org.apache.hadoop.conf.ConfServlet") != null) { - isShowConf = true; - } - } catch (Exception e) { - - } - return isShowConf; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HConstants.java deleted file mode 100644 index 5f14680..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ /dev/null @@ -1,679 +0,0 @@ -/** - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.regex.Pattern; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * HConstants holds a bunch of HBase-related constants - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public final class HConstants { - /** - * Status codes used for return values of bulk operations. - */ - public enum OperationStatusCode { - NOT_RUN, - SUCCESS, - SANITY_CHECK_FAILURE, - FAILURE; - } - - /** long constant for zero */ - public static final Long ZERO_L = Long.valueOf(0L); - public static final String NINES = "99999999999999"; - public static final String ZEROES = "00000000000000"; - - // For migration - - /** name of version file */ - public static final String VERSION_FILE_NAME = "hbase.version"; - - /** - * Current version of file system. - * Version 4 supports only one kind of bloom filter. - * Version 5 changes versions in catalog table regions. - * Version 6 enables blockcaching on catalog tables. - * Version 7 introduces hfile -- hbase 0.19 to 0.20.. - */ - // public static final String FILE_SYSTEM_VERSION = "6"; - public static final String FILE_SYSTEM_VERSION = "7"; - - // Configuration parameters - - //TODO: Is having HBase homed on port 60k OK? - - /** Cluster is in distributed mode or not */ - public static final String CLUSTER_DISTRIBUTED = "hbase.cluster.distributed"; - - /** Config for pluggable load balancers */ - public static final String HBASE_MASTER_LOADBALANCER_CLASS = "hbase.master.loadbalancer.class"; - - /** Cluster is standalone or pseudo-distributed */ - public static final boolean CLUSTER_IS_LOCAL = false; - - /** Cluster is fully-distributed */ - public static final boolean CLUSTER_IS_DISTRIBUTED = true; - - /** Default value for cluster distributed mode */ - public static final boolean DEFAULT_CLUSTER_DISTRIBUTED = CLUSTER_IS_LOCAL; - - /** default host address */ - public static final String DEFAULT_HOST = "0.0.0.0"; - - /** Parameter name for port master listens on. */ - public static final String MASTER_PORT = "hbase.master.port"; - - /** default port that the master listens on */ - public static final int DEFAULT_MASTER_PORT = 60000; - - /** default port for master web api */ - public static final int DEFAULT_MASTER_INFOPORT = 60010; - - /** Configuration key for master web API port */ - public static final String MASTER_INFO_PORT = "hbase.master.info.port"; - - /** Parameter name for the master type being backup (waits for primary to go inactive). */ - public static final String MASTER_TYPE_BACKUP = "hbase.master.backup"; - - /** by default every master is a possible primary master unless the conf explicitly overrides it */ - public static final boolean DEFAULT_MASTER_TYPE_BACKUP = false; - - /** Parameter name for ZooKeeper session time out.*/ - public static final String ZOOKEEPER_SESSION_TIMEOUT = - "zookeeper.session.timeout"; - - /** Name of ZooKeeper quorum configuration parameter. */ - public static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum"; - - /** Name of ZooKeeper config file in conf/ directory. */ - public static final String ZOOKEEPER_CONFIG_NAME = "zoo.cfg"; - - /** Common prefix of ZooKeeper configuration properties */ - public static final String ZK_CFG_PROPERTY_PREFIX = - "hbase.zookeeper.property."; - - public static final int ZK_CFG_PROPERTY_PREFIX_LEN = - ZK_CFG_PROPERTY_PREFIX.length(); - - /** - * The ZK client port key in the ZK properties map. The name reflects the - * fact that this is not an HBase configuration key. - */ - public static final String CLIENT_PORT_STR = "clientPort"; - - /** Parameter name for the client port that the zookeeper listens on */ - public static final String ZOOKEEPER_CLIENT_PORT = - ZK_CFG_PROPERTY_PREFIX + CLIENT_PORT_STR; - - /** Default client port that the zookeeper listens on */ - public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181; - - /** Parameter name for the wait time for the recoverable zookeeper */ - public static final String ZOOKEEPER_RECOVERABLE_WAITTIME = "hbase.zookeeper.recoverable.waittime"; - - /** Default wait time for the recoverable zookeeper */ - public static final long DEFAULT_ZOOKEPER_RECOVERABLE_WAITIME = 10000; - - /** Parameter name for the root dir in ZK for this cluster */ - public static final String ZOOKEEPER_ZNODE_PARENT = "zookeeper.znode.parent"; - - public static final String DEFAULT_ZOOKEEPER_ZNODE_PARENT = "/hbase"; - - /** - * Parameter name for the limit on concurrent client-side zookeeper - * connections - */ - public static final String ZOOKEEPER_MAX_CLIENT_CNXNS = - ZK_CFG_PROPERTY_PREFIX + "maxClientCnxns"; - - /** Parameter name for the ZK data directory */ - public static final String ZOOKEEPER_DATA_DIR = - ZK_CFG_PROPERTY_PREFIX + "dataDir"; - - /** Default limit on concurrent client-side zookeeper connections */ - public static final int DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS = 300; - - /** Configuration key for ZooKeeper session timeout */ - public static final String ZK_SESSION_TIMEOUT = "zookeeper.session.timeout"; - - /** Default value for ZooKeeper session timeout */ - public static final int DEFAULT_ZK_SESSION_TIMEOUT = 180 * 1000; - - /** Parameter name for port region server listens on. */ - public static final String REGIONSERVER_PORT = "hbase.regionserver.port"; - - /** Default port region server listens on. */ - public static final int DEFAULT_REGIONSERVER_PORT = 60020; - - /** default port for region server web api */ - public static final int DEFAULT_REGIONSERVER_INFOPORT = 60030; - - /** A configuration key for regionserver info port */ - public static final String REGIONSERVER_INFO_PORT = - "hbase.regionserver.info.port"; - - /** A flag that enables automatic selection of regionserver info port */ - public static final String REGIONSERVER_INFO_PORT_AUTO = - REGIONSERVER_INFO_PORT + ".auto"; - - /** Parameter name for what region server implementation to use. */ - public static final String REGION_SERVER_IMPL= "hbase.regionserver.impl"; - - /** Parameter name for what master implementation to use. */ - public static final String MASTER_IMPL= "hbase.master.impl"; - - /** Parameter name for how often threads should wake up */ - public static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency"; - - /** Default value for thread wake frequency */ - public static final int DEFAULT_THREAD_WAKE_FREQUENCY = 10 * 1000; - - /** Parameter name for how often we should try to write a version file, before failing */ - public static final String VERSION_FILE_WRITE_ATTEMPTS = "hbase.server.versionfile.writeattempts"; - - /** Parameter name for how often we should try to write a version file, before failing */ - public static final int DEFAULT_VERSION_FILE_WRITE_ATTEMPTS = 3; - - /** Parameter name for how often a region should should perform a major compaction */ - public static final String MAJOR_COMPACTION_PERIOD = "hbase.hregion.majorcompaction"; - - /** Parameter name for HBase instance root directory */ - public static final String HBASE_DIR = "hbase.rootdir"; - - /** Parameter name for HBase client IPC pool type */ - public static final String HBASE_CLIENT_IPC_POOL_TYPE = "hbase.client.ipc.pool.type"; - - /** Parameter name for HBase client IPC pool size */ - public static final String HBASE_CLIENT_IPC_POOL_SIZE = "hbase.client.ipc.pool.size"; - - /** Parameter name for HBase client operation timeout, which overrides RPC timeout */ - public static final String HBASE_CLIENT_OPERATION_TIMEOUT = "hbase.client.operation.timeout"; - - /** Default HBase client operation timeout, which is tantamount to a blocking call */ - public static final int DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT = Integer.MAX_VALUE; - - /** Used to construct the name of the log directory for a region server - * Use '.' as a special character to seperate the log files from table data */ - public static final String HREGION_LOGDIR_NAME = ".logs"; - - /** Used to construct the name of the splitlog directory for a region server */ - public static final String SPLIT_LOGDIR_NAME = "splitlog"; - - public static final String CORRUPT_DIR_NAME = ".corrupt"; - - /** Like the previous, but for old logs that are about to be deleted */ - public static final String HREGION_OLDLOGDIR_NAME = ".oldlogs"; - - /** Used to construct the name of the compaction directory during compaction */ - public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir"; - - /** Conf key for the max file size after which we split the region */ - public static final String HREGION_MAX_FILESIZE = - "hbase.hregion.max.filesize"; - - /** Default maximum file size */ - public static final long DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L; - - /** - * The max number of threads used for opening and closing stores or store - * files in parallel - */ - public static final String HSTORE_OPEN_AND_CLOSE_THREADS_MAX = - "hbase.hstore.open.and.close.threads.max"; - - /** - * The default number for the max number of threads used for opening and - * closing stores or store files in parallel - */ - public static final int DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX = 1; - - - /** Conf key for the memstore size at which we flush the memstore */ - public static final String HREGION_MEMSTORE_FLUSH_SIZE = - "hbase.hregion.memstore.flush.size"; - - /** Default size of a reservation block */ - public static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5; - - /** Maximum value length, enforced on KeyValue construction */ - public static final int MAXIMUM_VALUE_LENGTH = Integer.MAX_VALUE; - - /** name of the file for unique cluster ID */ - public static final String CLUSTER_ID_FILE_NAME = "hbase.id"; - - /** Configuration key storing the cluster ID */ - public static final String CLUSTER_ID = "hbase.cluster.id"; - - // Always store the location of the root table's HRegion. - // This HRegion is never split. - - // region name = table + startkey + regionid. This is the row key. - // each row in the root and meta tables describes exactly 1 region - // Do we ever need to know all the information that we are storing? - - // Note that the name of the root table starts with "-" and the name of the - // meta table starts with "." Why? it's a trick. It turns out that when we - // store region names in memory, we use a SortedMap. Since "-" sorts before - // "." (and since no other table name can start with either of these - // characters, the root region will always be the first entry in such a Map, - // followed by all the meta regions (which will be ordered by their starting - // row key as well), followed by all user tables. So when the Master is - // choosing regions to assign, it will always choose the root region first, - // followed by the meta regions, followed by user regions. Since the root - // and meta regions always need to be on-line, this ensures that they will - // be the first to be reassigned if the server(s) they are being served by - // should go down. - - /** The root table's name.*/ - public static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-"); - - /** The META table's name. */ - public static final byte [] META_TABLE_NAME = Bytes.toBytes(".META."); - - /** delimiter used between portions of a region name */ - public static final int META_ROW_DELIMITER = ','; - - /** The catalog family as a string*/ - public static final String CATALOG_FAMILY_STR = "info"; - - /** The catalog family */ - public static final byte [] CATALOG_FAMILY = Bytes.toBytes(CATALOG_FAMILY_STR); - - /** The RegionInfo qualifier as a string */ - public static final String REGIONINFO_QUALIFIER_STR = "regioninfo"; - - /** The regioninfo column qualifier */ - public static final byte [] REGIONINFO_QUALIFIER = - Bytes.toBytes(REGIONINFO_QUALIFIER_STR); - - /** The server column qualifier */ - public static final byte [] SERVER_QUALIFIER = Bytes.toBytes("server"); - - /** The startcode column qualifier */ - public static final byte [] STARTCODE_QUALIFIER = Bytes.toBytes("serverstartcode"); - - /** The lower-half split region column qualifier */ - public static final byte [] SPLITA_QUALIFIER = Bytes.toBytes("splitA"); - - /** The upper-half split region column qualifier */ - public static final byte [] SPLITB_QUALIFIER = Bytes.toBytes("splitB"); - - /** - * The meta table version column qualifier. - * We keep current version of the meta table in this column in -ROOT- - * table: i.e. in the 'info:v' column. - */ - public static final byte [] META_VERSION_QUALIFIER = Bytes.toBytes("v"); - - /** - * The current version of the meta table. - * Before this the meta had HTableDescriptor serialized into the HRegionInfo; - * i.e. pre-hbase 0.92. There was no META_VERSION column in the root table - * in this case. The presence of a version and its value being zero indicates - * meta is up-to-date. - */ - public static final short META_VERSION = 0; - - // Other constants - - /** - * An empty instance. - */ - public static final byte [] EMPTY_BYTE_ARRAY = new byte [0]; - - /** - * Used by scanners, etc when they want to start at the beginning of a region - */ - public static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY; - - /** - * Last row in a table. - */ - public static final byte [] EMPTY_END_ROW = EMPTY_START_ROW; - - /** - * Used by scanners and others when they're trying to detect the end of a - * table - */ - public static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY; - - /** - * Max length a row can have because of the limitation in TFile. - */ - public static final int MAX_ROW_LENGTH = Short.MAX_VALUE; - - /** When we encode strings, we always specify UTF8 encoding */ - public static final String UTF8_ENCODING = "UTF-8"; - - /** - * Timestamp to use when we want to refer to the latest cell. - * This is the timestamp sent by clients when no timestamp is specified on - * commit. - */ - public static final long LATEST_TIMESTAMP = Long.MAX_VALUE; - - /** - * Timestamp to use when we want to refer to the oldest cell. - */ - public static final long OLDEST_TIMESTAMP = Long.MIN_VALUE; - - /** - * LATEST_TIMESTAMP in bytes form - */ - public static final byte [] LATEST_TIMESTAMP_BYTES = Bytes.toBytes(LATEST_TIMESTAMP); - - /** - * Define for 'return-all-versions'. - */ - public static final int ALL_VERSIONS = Integer.MAX_VALUE; - - /** - * Unlimited time-to-live. - */ -// public static final int FOREVER = -1; - public static final int FOREVER = Integer.MAX_VALUE; - - /** - * Seconds in a week - */ - public static final int WEEK_IN_SECONDS = 7 * 24 * 3600; - - //TODO: although the following are referenced widely to format strings for - // the shell. They really aren't a part of the public API. It would be - // nice if we could put them somewhere where they did not need to be - // public. They could have package visibility - public static final String NAME = "NAME"; - public static final String VERSIONS = "VERSIONS"; - public static final String IN_MEMORY = "IN_MEMORY"; - public static final String CONFIG = "CONFIG"; - - /** - * This is a retry backoff multiplier table similar to the BSD TCP syn - * backoff table, a bit more aggressive than simple exponential backoff. - */ - public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 }; - - public static final String REGION_IMPL = "hbase.hregion.impl"; - - /** modifyTable op for replacing the table descriptor */ - public static enum Modify { - CLOSE_REGION, - TABLE_COMPACT, - TABLE_FLUSH, - TABLE_MAJOR_COMPACT, - TABLE_SET_HTD, - TABLE_SPLIT - } - - /** - * Scope tag for locally scoped data. - * This data will not be replicated. - */ - public static final int REPLICATION_SCOPE_LOCAL = 0; - - /** - * Scope tag for globally scoped data. - * This data will be replicated to all peers. - */ - public static final int REPLICATION_SCOPE_GLOBAL = 1; - - /** - * Default cluster ID, cannot be used to identify a cluster so a key with - * this value means it wasn't meant for replication. - */ - public static final UUID DEFAULT_CLUSTER_ID = new UUID(0L,0L); - - /** - * Parameter name for maximum number of bytes returned when calling a - * scanner's next method. - */ - public static String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size"; - - /** - * Maximum number of bytes returned when calling a scanner's next method. - * Note that when a single row is larger than this limit the row is still - * returned completely. - * - * The default value is unlimited. - */ - public static long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = Long.MAX_VALUE; - - /** - * Parameter name for client pause value, used mostly as value to wait - * before running a retry of a failed get, region lookup, etc. - */ - public static String HBASE_CLIENT_PAUSE = "hbase.client.pause"; - - /** - * Default value of {@link #HBASE_CLIENT_PAUSE}. - */ - public static long DEFAULT_HBASE_CLIENT_PAUSE = 1000; - - /** - * Parameter name for maximum retries, used as maximum for all retryable - * operations such as fetching of the root region from root region server, - * getting a cell's value, starting a row update, etc. - */ - public static String HBASE_CLIENT_RETRIES_NUMBER = "hbase.client.retries.number"; - - /** - * Default value of {@link #HBASE_CLIENT_RETRIES_NUMBER}. - */ - public static int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10; - - /** - * Parameter name for maximum attempts, used to limit the number of times the - * client will try to obtain the proxy for a given region server. - */ - public static String HBASE_CLIENT_RPC_MAXATTEMPTS = "hbase.client.rpc.maxattempts"; - - /** - * Default value of {@link #HBASE_CLIENT_RPC_MAXATTEMPTS}. - */ - public static int DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS = 1; - - /** - * Parameter name for client prefetch limit, used as the maximum number of regions - * info that will be prefetched. - */ - public static String HBASE_CLIENT_PREFETCH_LIMIT = "hbase.client.prefetch.limit"; - - /** - * Default value of {@link #HBASE_CLIENT_PREFETCH_LIMIT}. - */ - public static int DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT = 10; - - /** - * Parameter name for number of rows that will be fetched when calling next on - * a scanner if it is not served from memory. Higher caching values will - * enable faster scanners but will eat up more memory and some calls of next - * may take longer and longer times when the cache is empty. - */ - public static String HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching"; - - /** - * Default value of {@link #HBASE_META_SCANNER_CACHING}. - */ - public static int DEFAULT_HBASE_META_SCANNER_CACHING = 100; - - /** - * Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration} - * instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that, - * for all intents and purposes, are the same except for their instance ids, - * then they will not be able to share the same {@link org.apache.hadoop.hbase.client.HConnection} instance. - * On the other hand, even if the instance ids are the same, it could result - * in non-shared {@link org.apache.hadoop.hbase.client.HConnection} - * instances if some of the other connection parameters differ. - */ - public static String HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id"; - - /** - * HRegion server lease period in milliseconds. Clients must report in within this period - * else they are considered dead. Unit measured in ms (milliseconds). - */ - public static String HBASE_REGIONSERVER_LEASE_PERIOD_KEY = - "hbase.regionserver.lease.period"; - - /** - * Default value of {@link #HBASE_REGIONSERVER_LEASE_PERIOD_KEY}. - */ - public static long DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD = 60000; - - /** - * timeout for each RPC - */ - public static String HBASE_RPC_TIMEOUT_KEY = "hbase.rpc.timeout"; - - /** - * Default value of {@link #HBASE_RPC_TIMEOUT_KEY} - */ - public static int DEFAULT_HBASE_RPC_TIMEOUT = 60000; - - /* - * cluster replication constants. - */ - public static final String - REPLICATION_ENABLE_KEY = "hbase.replication"; - public static final String - REPLICATION_SOURCE_SERVICE_CLASSNAME = "hbase.replication.source.service"; - public static final String - REPLICATION_SINK_SERVICE_CLASSNAME = "hbase.replication.sink.service"; - public static final String REPLICATION_SERVICE_CLASSNAME_DEFAULT = - "org.apache.hadoop.hbase.replication.regionserver.Replication"; - - /** HBCK special code name used as server name when manipulating ZK nodes */ - public static final String HBCK_CODE_NAME = "HBCKServerName"; - - public static final ServerName HBCK_CODE_SERVERNAME = - new ServerName(HBCK_CODE_NAME, -1, -1L); - - public static final String KEY_FOR_HOSTNAME_SEEN_BY_MASTER = - "hbase.regionserver.hostname.seen.by.master"; - - public static final String HBASE_MASTER_LOGCLEANER_PLUGINS = - "hbase.master.logcleaner.plugins"; - - public static final String HBASE_REGION_SPLIT_POLICY_KEY = - "hbase.regionserver.region.split.policy"; - - /** - * Configuration key for the size of the block cache - */ - public static final String HFILE_BLOCK_CACHE_SIZE_KEY = - "hfile.block.cache.size"; - - public static final float HFILE_BLOCK_CACHE_SIZE_DEFAULT = 0.25f; - - /* - * Minimum percentage of free heap necessary for a successful cluster startup. - */ - public static final float HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD = 0.2f; - - public static final List HBASE_NON_USER_TABLE_DIRS = new ArrayList( - Arrays.asList(new String[]{ HREGION_LOGDIR_NAME, HREGION_OLDLOGDIR_NAME, - CORRUPT_DIR_NAME, Bytes.toString(META_TABLE_NAME), - Bytes.toString(ROOT_TABLE_NAME), SPLIT_LOGDIR_NAME })); - - public static final Pattern CP_HTD_ATTR_KEY_PATTERN = Pattern.compile - ("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); - public static final Pattern CP_HTD_ATTR_VALUE_PATTERN = - Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); - - public static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; - public static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; - public static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile( - "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + - CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); - - /** The delay when re-trying a socket operation in a loop (HBASE-4712) */ - public static final int SOCKET_RETRY_WAIT_MS = 200; - - /** Host name of the local machine */ - public static final String LOCALHOST = "localhost"; - - /** - * If this parameter is set to true, then hbase will read - * data and then verify checksums. Checksum verification - * inside hdfs will be switched off. However, if the hbase-checksum - * verification fails, then it will switch back to using - * hdfs checksums for verifiying data that is being read from storage. - * - * If this parameter is set to false, then hbase will not - * verify any checksums, instead it will depend on checksum verification - * being done in the hdfs client. - */ - public static final String HBASE_CHECKSUM_VERIFICATION = - "hbase.regionserver.checksum.verify"; - - public static final String LOCALHOST_IP = "127.0.0.1"; - - /** Conf key that enables distributed log splitting */ - public static final String DISTRIBUTED_LOG_SPLITTING_KEY = - "hbase.master.distributed.log.splitting"; - - /** - * The name of the configuration parameter that specifies - * the number of bytes in a newly created checksum chunk. - */ - public static final String BYTES_PER_CHECKSUM = - "hbase.hstore.bytes.per.checksum"; - - /** - * The name of the configuration parameter that specifies - * the name of an algorithm that is used to compute checksums - * for newly created blocks. - */ - public static final String CHECKSUM_TYPE_NAME = - "hbase.hstore.checksum.algorithm"; - - /** Enable file permission modification from standard hbase */ - public static final String ENABLE_DATA_FILE_UMASK = "hbase.data.umask.enable"; - /** File permission umask to use when creating hbase data files */ - public static final String DATA_FILE_UMASK_KEY = "hbase.data.umask"; - - /** Configuration name of HLog Compression */ - public static final String ENABLE_WAL_COMPRESSION = - "hbase.regionserver.wal.enablecompression"; - -/** Region in Transition metrics threshold time */ - public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD="hbase.metrics.rit.stuck.warning.threshold"; - - public static final String LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop"; - - /** - * The byte array represents for NO_NEXT_INDEXED_KEY; - * The actual value is irrelevant because this is always compared by reference. - */ - public static final byte [] NO_NEXT_INDEXED_KEY = Bytes.toBytes("NO_NEXT_INDEXED_KEY"); - - private HConstants() { - // Can't be instantiated with this ctor. - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java deleted file mode 100644 index c9efee1..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.lang.annotation.*; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * A package attribute that captures the version of hbase that was compiled. - * Copied down from hadoop. All is same except name of interface. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.PACKAGE) -@InterfaceAudience.Private -public @interface VersionAnnotation { - - /** - * Get the Hadoop version - * @return the version string "0.6.3-dev" - */ - String version(); - - /** - * Get the username that compiled Hadoop. - */ - String user(); - - /** - * Get the date when Hadoop was compiled. - * @return the date in unix 'date' format - */ - String date(); - - /** - * Get the url for the subversion repository. - */ - String url(); - - /** - * Get the subversion revision. - * @return the revision number as a string (eg. "451451") - */ - String revision(); -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java deleted file mode 100644 index 926f12d..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java +++ /dev/null @@ -1,273 +0,0 @@ -/** - * Copyright 2009 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.io; - -import java.io.IOException; -import java.io.DataInput; -import java.io.DataOutput; -import java.util.Arrays; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.WritableComparator; - -/** - * A byte sequence that is usable as a key or value. Based on - * {@link org.apache.hadoop.io.BytesWritable} only this class is NOT resizable - * and DOES NOT distinguish between the size of the seqeunce and the current - * capacity as {@link org.apache.hadoop.io.BytesWritable} does. Hence its - * comparatively 'immutable'. When creating a new instance of this class, - * the underlying byte [] is not copied, just referenced. The backing - * buffer is accessed when we go to serialize. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ImmutableBytesWritable -implements WritableComparable { - private byte[] bytes; - private int offset; - private int length; - - /** - * Create a zero-size sequence. - */ - public ImmutableBytesWritable() { - super(); - } - - /** - * Create a ImmutableBytesWritable using the byte array as the initial value. - * @param bytes This array becomes the backing storage for the object. - */ - public ImmutableBytesWritable(byte[] bytes) { - this(bytes, 0, bytes.length); - } - - /** - * Set the new ImmutableBytesWritable to the contents of the passed - * ibw. - * @param ibw the value to set this ImmutableBytesWritable to. - */ - public ImmutableBytesWritable(final ImmutableBytesWritable ibw) { - this(ibw.get(), 0, ibw.getSize()); - } - - /** - * Set the value to a given byte range - * @param bytes the new byte range to set to - * @param offset the offset in newData to start at - * @param length the number of bytes in the range - */ - public ImmutableBytesWritable(final byte[] bytes, final int offset, - final int length) { - this.bytes = bytes; - this.offset = offset; - this.length = length; - } - - /** - * Get the data from the BytesWritable. - * @return The data is only valid between offset and offset+length. - */ - public byte [] get() { - if (this.bytes == null) { - throw new IllegalStateException("Uninitialiized. Null constructor " + - "called w/o accompaying readFields invocation"); - } - return this.bytes; - } - - /** - * @param b Use passed bytes as backing array for this instance. - */ - public void set(final byte [] b) { - set(b, 0, b.length); - } - - /** - * @param b Use passed bytes as backing array for this instance. - * @param offset - * @param length - */ - public void set(final byte [] b, final int offset, final int length) { - this.bytes = b; - this.offset = offset; - this.length = length; - } - - /** - * @return the number of valid bytes in the buffer - */ - public int getSize() { - if (this.bytes == null) { - throw new IllegalStateException("Uninitialiized. Null constructor " + - "called w/o accompaying readFields invocation"); - } - return this.length; - } - - /** - * @return the number of valid bytes in the buffer - */ - //Should probably deprecate getSize() so that we keep the same calls for all - //byte [] - public int getLength() { - if (this.bytes == null) { - throw new IllegalStateException("Uninitialiized. Null constructor " + - "called w/o accompaying readFields invocation"); - } - return this.length; - } - - /** - * @return offset - */ - public int getOffset(){ - return this.offset; - } - - public void readFields(final DataInput in) throws IOException { - this.length = in.readInt(); - this.bytes = new byte[this.length]; - in.readFully(this.bytes, 0, this.length); - this.offset = 0; - } - - public void write(final DataOutput out) throws IOException { - out.writeInt(this.length); - out.write(this.bytes, this.offset, this.length); - } - - // Below methods copied from BytesWritable - @Override - public int hashCode() { - int hash = 1; - for (int i = offset; i < offset + length; i++) - hash = (31 * hash) + (int)bytes[i]; - return hash; - } - - /** - * Define the sort order of the BytesWritable. - * @param that The other bytes writable - * @return Positive if left is bigger than right, 0 if they are equal, and - * negative if left is smaller than right. - */ - public int compareTo(ImmutableBytesWritable that) { - return WritableComparator.compareBytes( - this.bytes, this.offset, this.length, - that.bytes, that.offset, that.length); - } - - /** - * Compares the bytes in this object to the specified byte array - * @param that - * @return Positive if left is bigger than right, 0 if they are equal, and - * negative if left is smaller than right. - */ - public int compareTo(final byte [] that) { - return WritableComparator.compareBytes( - this.bytes, this.offset, this.length, - that, 0, that.length); - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object right_obj) { - if (right_obj instanceof byte []) { - return compareTo((byte [])right_obj) == 0; - } - if (right_obj instanceof ImmutableBytesWritable) { - return compareTo((ImmutableBytesWritable)right_obj) == 0; - } - return false; - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - StringBuilder sb = new StringBuilder(3*this.bytes.length); - for (int idx = offset; idx < offset + length; idx++) { - // if not the first, put a blank separator in - if (idx != offset) { - sb.append(' '); - } - String num = Integer.toHexString(bytes[idx]); - // if it is only one digit, add a leading 0. - if (num.length() < 2) { - sb.append('0'); - } - sb.append(num); - } - return sb.toString(); - } - - /** A Comparator optimized for ImmutableBytesWritable. - */ - public static class Comparator extends WritableComparator { - private BytesWritable.Comparator comparator = - new BytesWritable.Comparator(); - - /** constructor */ - public Comparator() { - super(ImmutableBytesWritable.class); - } - - /** - * @see org.apache.hadoop.io.WritableComparator#compare(byte[], int, int, byte[], int, int) - */ - @Override - public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { - return comparator.compare(b1, s1, l1, b2, s2, l2); - } - } - - static { // register this comparator - WritableComparator.define(ImmutableBytesWritable.class, new Comparator()); - } - - /** - * @param array List of byte []. - * @return Array of byte []. - */ - public static byte [][] toArray(final List array) { - // List#toArray doesn't work on lists of byte []. - byte[][] results = new byte[array.size()][]; - for (int i = 0; i < array.size(); i++) { - results[i] = array.get(i); - } - return results; - } - - /** - * Returns a copy of the bytes referred to by this writable - */ - public byte[] copyBytes() { - return Arrays.copyOfRange(bytes, offset, offset+length); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 9f78505..22385e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -105,6 +105,9 @@ public class AssignmentManager extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(AssignmentManager.class); + public static final ServerName HBCK_CODE_SERVERNAME = new ServerName(HConstants.HBCK_CODE_NAME, + -1, -1L); + protected Server master; private ServerManager serverManager; @@ -709,7 +712,7 @@ public class AssignmentManager extends ZooKeeperListener { return; } // Check if this is a special HBCK transition - if (sn.equals(HConstants.HBCK_CODE_SERVERNAME)) { + if (sn.equals(HBCK_CODE_SERVERNAME)) { handleHBCK(rt); return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Bytes.java deleted file mode 100644 index 33ecb2e..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ /dev/null @@ -1,1661 +0,0 @@ -/** - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.lang.reflect.Field; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Comparator; -import java.util.Iterator; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.io.RawComparator; -import org.apache.hadoop.io.WritableComparator; -import org.apache.hadoop.io.WritableUtils; - -import sun.misc.Unsafe; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Utility class that handles byte arrays, conversions to/from other types, - * comparisons, hash code generation, manufacturing keys for HashMaps or - * HashSets, etc. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Bytes { - - private static final Log LOG = LogFactory.getLog(Bytes.class); - - /** - * Size of boolean in bytes - */ - public static final int SIZEOF_BOOLEAN = Byte.SIZE / Byte.SIZE; - - /** - * Size of byte in bytes - */ - public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN; - - /** - * Size of char in bytes - */ - public static final int SIZEOF_CHAR = Character.SIZE / Byte.SIZE; - - /** - * Size of double in bytes - */ - public static final int SIZEOF_DOUBLE = Double.SIZE / Byte.SIZE; - - /** - * Size of float in bytes - */ - public static final int SIZEOF_FLOAT = Float.SIZE / Byte.SIZE; - - /** - * Size of int in bytes - */ - public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE; - - /** - * Size of long in bytes - */ - public static final int SIZEOF_LONG = Long.SIZE / Byte.SIZE; - - /** - * Size of short in bytes - */ - public static final int SIZEOF_SHORT = Short.SIZE / Byte.SIZE; - - - /** - * Estimate of size cost to pay beyond payload in jvm for instance of byte []. - * Estimate based on study of jhat and jprofiler numbers. - */ - // JHat says BU is 56 bytes. - // SizeOf which uses java.lang.instrument says 24 bytes. (3 longs?) - public static final int ESTIMATED_HEAP_TAX = 16; - - /** - * Byte array comparator class. - */ - public static class ByteArrayComparator implements RawComparator { - /** - * Constructor - */ - public ByteArrayComparator() { - super(); - } - public int compare(byte [] left, byte [] right) { - return compareTo(left, right); - } - public int compare(byte [] b1, int s1, int l1, byte [] b2, int s2, int l2) { - return LexicographicalComparerHolder.BEST_COMPARER. - compareTo(b1, s1, l1, b2, s2, l2); - } - } - - /** - * Pass this to TreeMaps where byte [] are keys. - */ - public static Comparator BYTES_COMPARATOR = - new ByteArrayComparator(); - - /** - * Use comparing byte arrays, byte-by-byte - */ - public static RawComparator BYTES_RAWCOMPARATOR = - new ByteArrayComparator(); - - /** - * Read byte-array written with a WritableableUtils.vint prefix. - * @param in Input to read from. - * @return byte array read off in - * @throws IOException e - */ - public static byte [] readByteArray(final DataInput in) - throws IOException { - int len = WritableUtils.readVInt(in); - if (len < 0) { - throw new NegativeArraySizeException(Integer.toString(len)); - } - byte [] result = new byte[len]; - in.readFully(result, 0, len); - return result; - } - - /** - * Read byte-array written with a WritableableUtils.vint prefix. - * IOException is converted to a RuntimeException. - * @param in Input to read from. - * @return byte array read off in - */ - public static byte [] readByteArrayThrowsRuntime(final DataInput in) { - try { - return readByteArray(in); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - /** - * Write byte-array with a WritableableUtils.vint prefix. - * @param out output stream to be written to - * @param b array to write - * @throws IOException e - */ - public static void writeByteArray(final DataOutput out, final byte [] b) - throws IOException { - if(b == null) { - WritableUtils.writeVInt(out, 0); - } else { - writeByteArray(out, b, 0, b.length); - } - } - - /** - * Write byte-array to out with a vint length prefix. - * @param out output stream - * @param b array - * @param offset offset into array - * @param length length past offset - * @throws IOException e - */ - public static void writeByteArray(final DataOutput out, final byte [] b, - final int offset, final int length) - throws IOException { - WritableUtils.writeVInt(out, length); - out.write(b, offset, length); - } - - /** - * Write byte-array from src to tgt with a vint length prefix. - * @param tgt target array - * @param tgtOffset offset into target array - * @param src source array - * @param srcOffset source offset - * @param srcLength source length - * @return New offset in src array. - */ - public static int writeByteArray(final byte [] tgt, final int tgtOffset, - final byte [] src, final int srcOffset, final int srcLength) { - byte [] vint = vintToBytes(srcLength); - System.arraycopy(vint, 0, tgt, tgtOffset, vint.length); - int offset = tgtOffset + vint.length; - System.arraycopy(src, srcOffset, tgt, offset, srcLength); - return offset + srcLength; - } - - /** - * Put bytes at the specified byte array position. - * @param tgtBytes the byte array - * @param tgtOffset position in the array - * @param srcBytes array to write out - * @param srcOffset source offset - * @param srcLength source length - * @return incremented offset - */ - public static int putBytes(byte[] tgtBytes, int tgtOffset, byte[] srcBytes, - int srcOffset, int srcLength) { - System.arraycopy(srcBytes, srcOffset, tgtBytes, tgtOffset, srcLength); - return tgtOffset + srcLength; - } - - /** - * Write a single byte out to the specified byte array position. - * @param bytes the byte array - * @param offset position in the array - * @param b byte to write out - * @return incremented offset - */ - public static int putByte(byte[] bytes, int offset, byte b) { - bytes[offset] = b; - return offset + 1; - } - - /** - * Returns a new byte array, copied from the passed ByteBuffer. - * @param bb A ByteBuffer - * @return the byte array - */ - public static byte[] toBytes(ByteBuffer bb) { - int length = bb.limit(); - byte [] result = new byte[length]; - System.arraycopy(bb.array(), bb.arrayOffset(), result, 0, length); - return result; - } - - /** - * @param b Presumed UTF-8 encoded byte array. - * @return String made from b - */ - public static String toString(final byte [] b) { - if (b == null) { - return null; - } - return toString(b, 0, b.length); - } - - /** - * Joins two byte arrays together using a separator. - * @param b1 The first byte array. - * @param sep The separator to use. - * @param b2 The second byte array. - */ - public static String toString(final byte [] b1, - String sep, - final byte [] b2) { - return toString(b1, 0, b1.length) + sep + toString(b2, 0, b2.length); - } - - /** - * This method will convert utf8 encoded bytes into a string. If - * an UnsupportedEncodingException occurs, this method will eat it - * and return null instead. - * - * @param b Presumed UTF-8 encoded byte array. - * @param off offset into array - * @param len length of utf-8 sequence - * @return String made from b or null - */ - public static String toString(final byte [] b, int off, int len) { - if (b == null) { - return null; - } - if (len == 0) { - return ""; - } - try { - return new String(b, off, len, HConstants.UTF8_ENCODING); - } catch (UnsupportedEncodingException e) { - LOG.error("UTF-8 not supported?", e); - return null; - } - } - - /** - * Write a printable representation of a byte array. - * - * @param b byte array - * @return string - * @see #toStringBinary(byte[], int, int) - */ - public static String toStringBinary(final byte [] b) { - if (b == null) - return "null"; - return toStringBinary(b, 0, b.length); - } - - /** - * Converts the given byte buffer, from its array offset to its limit, to - * a string. The position and the mark are ignored. - * - * @param buf a byte buffer - * @return a string representation of the buffer's binary contents - */ - public static String toStringBinary(ByteBuffer buf) { - if (buf == null) - return "null"; - return toStringBinary(buf.array(), buf.arrayOffset(), buf.limit()); - } - - /** - * Write a printable representation of a byte array. Non-printable - * characters are hex escaped in the format \\x%02X, eg: - * \x00 \x05 etc - * - * @param b array to write out - * @param off offset to start at - * @param len length to write - * @return string output - */ - public static String toStringBinary(final byte [] b, int off, int len) { - StringBuilder result = new StringBuilder(); - // Just in case we are passed a 'len' that is > buffer length... - if (off >= b.length) return result.toString(); - if (off + len > b.length) len = b.length - off; - try { - String first = new String(b, off, len, "ISO-8859-1"); - for (int i = 0; i < first.length() ; ++i ) { - int ch = first.charAt(i) & 0xFF; - if ( (ch >= '0' && ch <= '9') - || (ch >= 'A' && ch <= 'Z') - || (ch >= 'a' && ch <= 'z') - || " `~!@#$%^&*()-_=+[]{}\\|;:'\",.<>/?".indexOf(ch) >= 0 ) { - result.append(first.charAt(i)); - } else { - result.append(String.format("\\x%02X", ch)); - } - } - } catch (UnsupportedEncodingException e) { - LOG.error("ISO-8859-1 not supported?", e); - } - return result.toString(); - } - - private static boolean isHexDigit(char c) { - return - (c >= 'A' && c <= 'F') || - (c >= '0' && c <= '9'); - } - - /** - * Takes a ASCII digit in the range A-F0-9 and returns - * the corresponding integer/ordinal value. - * @param ch The hex digit. - * @return The converted hex value as a byte. - */ - public static byte toBinaryFromHex(byte ch) { - if ( ch >= 'A' && ch <= 'F' ) - return (byte) ((byte)10 + (byte) (ch - 'A')); - // else - return (byte) (ch - '0'); - } - - public static byte [] toBytesBinary(String in) { - // this may be bigger than we need, but lets be safe. - byte [] b = new byte[in.length()]; - int size = 0; - for (int i = 0; i < in.length(); ++i) { - char ch = in.charAt(i); - if (ch == '\\') { - // begin hex escape: - char next = in.charAt(i+1); - if (next != 'x') { - // invalid escape sequence, ignore this one. - b[size++] = (byte)ch; - continue; - } - // ok, take next 2 hex digits. - char hd1 = in.charAt(i+2); - char hd2 = in.charAt(i+3); - - // they need to be A-F0-9: - if (!isHexDigit(hd1) || - !isHexDigit(hd2)) { - // bogus escape code, ignore: - continue; - } - // turn hex ASCII digit -> number - byte d = (byte) ((toBinaryFromHex((byte)hd1) << 4) + toBinaryFromHex((byte)hd2)); - - b[size++] = d; - i += 3; // skip 3 - } else { - b[size++] = (byte) ch; - } - } - // resize: - byte [] b2 = new byte[size]; - System.arraycopy(b, 0, b2, 0, size); - return b2; - } - - /** - * Converts a string to a UTF-8 byte array. - * @param s string - * @return the byte array - */ - public static byte[] toBytes(String s) { - try { - return s.getBytes(HConstants.UTF8_ENCODING); - } catch (UnsupportedEncodingException e) { - LOG.error("UTF-8 not supported?", e); - return null; - } - } - - /** - * Convert a boolean to a byte array. True becomes -1 - * and false becomes 0. - * - * @param b value - * @return b encoded in a byte array. - */ - public static byte [] toBytes(final boolean b) { - return new byte[] { b ? (byte) -1 : (byte) 0 }; - } - - /** - * Reverses {@link #toBytes(boolean)} - * @param b array - * @return True or false. - */ - public static boolean toBoolean(final byte [] b) { - if (b.length != 1) { - throw new IllegalArgumentException("Array has wrong size: " + b.length); - } - return b[0] != (byte) 0; - } - - /** - * Convert a long value to a byte array using big-endian. - * - * @param val value to convert - * @return the byte array - */ - public static byte[] toBytes(long val) { - byte [] b = new byte[8]; - for (int i = 7; i > 0; i--) { - b[i] = (byte) val; - val >>>= 8; - } - b[0] = (byte) val; - return b; - } - - /** - * Converts a byte array to a long value. Reverses - * {@link #toBytes(long)} - * @param bytes array - * @return the long value - */ - public static long toLong(byte[] bytes) { - return toLong(bytes, 0, SIZEOF_LONG); - } - - /** - * Converts a byte array to a long value. Assumes there will be - * {@link #SIZEOF_LONG} bytes available. - * - * @param bytes bytes - * @param offset offset - * @return the long value - */ - public static long toLong(byte[] bytes, int offset) { - return toLong(bytes, offset, SIZEOF_LONG); - } - - /** - * Converts a byte array to a long value. - * - * @param bytes array of bytes - * @param offset offset into array - * @param length length of data (must be {@link #SIZEOF_LONG}) - * @return the long value - * @throws IllegalArgumentException if length is not {@link #SIZEOF_LONG} or - * if there's not enough room in the array at the offset indicated. - */ - public static long toLong(byte[] bytes, int offset, final int length) { - if (length != SIZEOF_LONG || offset + length > bytes.length) { - throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_LONG); - } - long l = 0; - for(int i = offset; i < offset + length; i++) { - l <<= 8; - l ^= bytes[i] & 0xFF; - } - return l; - } - - private static IllegalArgumentException - explainWrongLengthOrOffset(final byte[] bytes, - final int offset, - final int length, - final int expectedLength) { - String reason; - if (length != expectedLength) { - reason = "Wrong length: " + length + ", expected " + expectedLength; - } else { - reason = "offset (" + offset + ") + length (" + length + ") exceed the" - + " capacity of the array: " + bytes.length; - } - return new IllegalArgumentException(reason); - } - - /** - * Put a long value out to the specified byte array position. - * @param bytes the byte array - * @param offset position in the array - * @param val long to write out - * @return incremented offset - * @throws IllegalArgumentException if the byte array given doesn't have - * enough room at the offset specified. - */ - public static int putLong(byte[] bytes, int offset, long val) { - if (bytes.length - offset < SIZEOF_LONG) { - throw new IllegalArgumentException("Not enough room to put a long at" - + " offset " + offset + " in a " + bytes.length + " byte array"); - } - for(int i = offset + 7; i > offset; i--) { - bytes[i] = (byte) val; - val >>>= 8; - } - bytes[offset] = (byte) val; - return offset + SIZEOF_LONG; - } - - /** - * Presumes float encoded as IEEE 754 floating-point "single format" - * @param bytes byte array - * @return Float made from passed byte array. - */ - public static float toFloat(byte [] bytes) { - return toFloat(bytes, 0); - } - - /** - * Presumes float encoded as IEEE 754 floating-point "single format" - * @param bytes array to convert - * @param offset offset into array - * @return Float made from passed byte array. - */ - public static float toFloat(byte [] bytes, int offset) { - return Float.intBitsToFloat(toInt(bytes, offset, SIZEOF_INT)); - } - - /** - * @param bytes byte array - * @param offset offset to write to - * @param f float value - * @return New offset in bytes - */ - public static int putFloat(byte [] bytes, int offset, float f) { - return putInt(bytes, offset, Float.floatToRawIntBits(f)); - } - - /** - * @param f float value - * @return the float represented as byte [] - */ - public static byte [] toBytes(final float f) { - // Encode it as int - return Bytes.toBytes(Float.floatToRawIntBits(f)); - } - - /** - * @param bytes byte array - * @return Return double made from passed bytes. - */ - public static double toDouble(final byte [] bytes) { - return toDouble(bytes, 0); - } - - /** - * @param bytes byte array - * @param offset offset where double is - * @return Return double made from passed bytes. - */ - public static double toDouble(final byte [] bytes, final int offset) { - return Double.longBitsToDouble(toLong(bytes, offset, SIZEOF_LONG)); - } - - /** - * @param bytes byte array - * @param offset offset to write to - * @param d value - * @return New offset into array bytes - */ - public static int putDouble(byte [] bytes, int offset, double d) { - return putLong(bytes, offset, Double.doubleToLongBits(d)); - } - - /** - * Serialize a double as the IEEE 754 double format output. The resultant - * array will be 8 bytes long. - * - * @param d value - * @return the double represented as byte [] - */ - public static byte [] toBytes(final double d) { - // Encode it as a long - return Bytes.toBytes(Double.doubleToRawLongBits(d)); - } - - /** - * Convert an int value to a byte array - * @param val value - * @return the byte array - */ - public static byte[] toBytes(int val) { - byte [] b = new byte[4]; - for(int i = 3; i > 0; i--) { - b[i] = (byte) val; - val >>>= 8; - } - b[0] = (byte) val; - return b; - } - - /** - * Converts a byte array to an int value - * @param bytes byte array - * @return the int value - */ - public static int toInt(byte[] bytes) { - return toInt(bytes, 0, SIZEOF_INT); - } - - /** - * Converts a byte array to an int value - * @param bytes byte array - * @param offset offset into array - * @return the int value - */ - public static int toInt(byte[] bytes, int offset) { - return toInt(bytes, offset, SIZEOF_INT); - } - - /** - * Converts a byte array to an int value - * @param bytes byte array - * @param offset offset into array - * @param length length of int (has to be {@link #SIZEOF_INT}) - * @return the int value - * @throws IllegalArgumentException if length is not {@link #SIZEOF_INT} or - * if there's not enough room in the array at the offset indicated. - */ - public static int toInt(byte[] bytes, int offset, final int length) { - if (length != SIZEOF_INT || offset + length > bytes.length) { - throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_INT); - } - int n = 0; - for(int i = offset; i < (offset + length); i++) { - n <<= 8; - n ^= bytes[i] & 0xFF; - } - return n; - } - - /** - * Put an int value out to the specified byte array position. - * @param bytes the byte array - * @param offset position in the array - * @param val int to write out - * @return incremented offset - * @throws IllegalArgumentException if the byte array given doesn't have - * enough room at the offset specified. - */ - public static int putInt(byte[] bytes, int offset, int val) { - if (bytes.length - offset < SIZEOF_INT) { - throw new IllegalArgumentException("Not enough room to put an int at" - + " offset " + offset + " in a " + bytes.length + " byte array"); - } - for(int i= offset + 3; i > offset; i--) { - bytes[i] = (byte) val; - val >>>= 8; - } - bytes[offset] = (byte) val; - return offset + SIZEOF_INT; - } - - /** - * Convert a short value to a byte array of {@link #SIZEOF_SHORT} bytes long. - * @param val value - * @return the byte array - */ - public static byte[] toBytes(short val) { - byte[] b = new byte[SIZEOF_SHORT]; - b[1] = (byte) val; - val >>= 8; - b[0] = (byte) val; - return b; - } - - /** - * Converts a byte array to a short value - * @param bytes byte array - * @return the short value - */ - public static short toShort(byte[] bytes) { - return toShort(bytes, 0, SIZEOF_SHORT); - } - - /** - * Converts a byte array to a short value - * @param bytes byte array - * @param offset offset into array - * @return the short value - */ - public static short toShort(byte[] bytes, int offset) { - return toShort(bytes, offset, SIZEOF_SHORT); - } - - /** - * Converts a byte array to a short value - * @param bytes byte array - * @param offset offset into array - * @param length length, has to be {@link #SIZEOF_SHORT} - * @return the short value - * @throws IllegalArgumentException if length is not {@link #SIZEOF_SHORT} - * or if there's not enough room in the array at the offset indicated. - */ - public static short toShort(byte[] bytes, int offset, final int length) { - if (length != SIZEOF_SHORT || offset + length > bytes.length) { - throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_SHORT); - } - short n = 0; - n ^= bytes[offset] & 0xFF; - n <<= 8; - n ^= bytes[offset+1] & 0xFF; - return n; - } - - /** - * This method will get a sequence of bytes from pos -> limit, - * but will restore pos after. - * @param buf - * @return byte array - */ - public static byte[] getBytes(ByteBuffer buf) { - int savedPos = buf.position(); - byte [] newBytes = new byte[buf.remaining()]; - buf.get(newBytes); - buf.position(savedPos); - return newBytes; - } - - /** - * Put a short value out to the specified byte array position. - * @param bytes the byte array - * @param offset position in the array - * @param val short to write out - * @return incremented offset - * @throws IllegalArgumentException if the byte array given doesn't have - * enough room at the offset specified. - */ - public static int putShort(byte[] bytes, int offset, short val) { - if (bytes.length - offset < SIZEOF_SHORT) { - throw new IllegalArgumentException("Not enough room to put a short at" - + " offset " + offset + " in a " + bytes.length + " byte array"); - } - bytes[offset+1] = (byte) val; - val >>= 8; - bytes[offset] = (byte) val; - return offset + SIZEOF_SHORT; - } - - /** - * Convert a BigDecimal value to a byte array - * - * @param val - * @return the byte array - */ - public static byte[] toBytes(BigDecimal val) { - byte[] valueBytes = val.unscaledValue().toByteArray(); - byte[] result = new byte[valueBytes.length + SIZEOF_INT]; - int offset = putInt(result, 0, val.scale()); - putBytes(result, offset, valueBytes, 0, valueBytes.length); - return result; - } - - - /** - * Converts a byte array to a BigDecimal - * - * @param bytes - * @return the char value - */ - public static BigDecimal toBigDecimal(byte[] bytes) { - return toBigDecimal(bytes, 0, bytes.length); - } - - /** - * Converts a byte array to a BigDecimal value - * - * @param bytes - * @param offset - * @param length - * @return the char value - */ - public static BigDecimal toBigDecimal(byte[] bytes, int offset, final int length) { - if (bytes == null || length < SIZEOF_INT + 1 || - (offset + length > bytes.length)) { - return null; - } - - int scale = toInt(bytes, offset); - byte[] tcBytes = new byte[length - SIZEOF_INT]; - System.arraycopy(bytes, offset + SIZEOF_INT, tcBytes, 0, length - SIZEOF_INT); - return new BigDecimal(new BigInteger(tcBytes), scale); - } - - /** - * Put a BigDecimal value out to the specified byte array position. - * - * @param bytes the byte array - * @param offset position in the array - * @param val BigDecimal to write out - * @return incremented offset - */ - public static int putBigDecimal(byte[] bytes, int offset, BigDecimal val) { - if (bytes == null) { - return offset; - } - - byte[] valueBytes = val.unscaledValue().toByteArray(); - byte[] result = new byte[valueBytes.length + SIZEOF_INT]; - offset = putInt(result, offset, val.scale()); - return putBytes(result, offset, valueBytes, 0, valueBytes.length); - } - - /** - * @param vint Integer to make a vint of. - * @return Vint as bytes array. - */ - public static byte [] vintToBytes(final long vint) { - long i = vint; - int size = WritableUtils.getVIntSize(i); - byte [] result = new byte[size]; - int offset = 0; - if (i >= -112 && i <= 127) { - result[offset] = (byte) i; - return result; - } - - int len = -112; - if (i < 0) { - i ^= -1L; // take one's complement' - len = -120; - } - - long tmp = i; - while (tmp != 0) { - tmp = tmp >> 8; - len--; - } - - result[offset++] = (byte) len; - - len = (len < -120) ? -(len + 120) : -(len + 112); - - for (int idx = len; idx != 0; idx--) { - int shiftbits = (idx - 1) * 8; - long mask = 0xFFL << shiftbits; - result[offset++] = (byte)((i & mask) >> shiftbits); - } - return result; - } - - /** - * @param buffer buffer to convert - * @return vint bytes as an integer. - */ - public static long bytesToVint(final byte [] buffer) { - int offset = 0; - byte firstByte = buffer[offset++]; - int len = WritableUtils.decodeVIntSize(firstByte); - if (len == 1) { - return firstByte; - } - long i = 0; - for (int idx = 0; idx < len-1; idx++) { - byte b = buffer[offset++]; - i = i << 8; - i = i | (b & 0xFF); - } - return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); - } - - /** - * Reads a zero-compressed encoded long from input stream and returns it. - * @param buffer Binary array - * @param offset Offset into array at which vint begins. - * @throws java.io.IOException e - * @return deserialized long from stream. - */ - public static long readVLong(final byte [] buffer, final int offset) - throws IOException { - byte firstByte = buffer[offset]; - int len = WritableUtils.decodeVIntSize(firstByte); - if (len == 1) { - return firstByte; - } - long i = 0; - for (int idx = 0; idx < len-1; idx++) { - byte b = buffer[offset + 1 + idx]; - i = i << 8; - i = i | (b & 0xFF); - } - return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); - } - - /** - * @param left left operand - * @param right right operand - * @return 0 if equal, < 0 if left is less than right, etc. - */ - public static int compareTo(final byte [] left, final byte [] right) { - return LexicographicalComparerHolder.BEST_COMPARER. - compareTo(left, 0, left.length, right, 0, right.length); - } - - /** - * Lexicographically compare two arrays. - * - * @param buffer1 left operand - * @param buffer2 right operand - * @param offset1 Where to start comparing in the left buffer - * @param offset2 Where to start comparing in the right buffer - * @param length1 How much to compare from the left buffer - * @param length2 How much to compare from the right buffer - * @return 0 if equal, < 0 if left is less than right, etc. - */ - public static int compareTo(byte[] buffer1, int offset1, int length1, - byte[] buffer2, int offset2, int length2) { - return LexicographicalComparerHolder.BEST_COMPARER. - compareTo(buffer1, offset1, length1, buffer2, offset2, length2); - } - - interface Comparer { - abstract public int compareTo(T buffer1, int offset1, int length1, - T buffer2, int offset2, int length2); - } - - @VisibleForTesting - static Comparer lexicographicalComparerJavaImpl() { - return LexicographicalComparerHolder.PureJavaComparer.INSTANCE; - } - - /** - * Provides a lexicographical comparer implementation; either a Java - * implementation or a faster implementation based on {@link Unsafe}. - * - *

Uses reflection to gracefully fall back to the Java implementation if - * {@code Unsafe} isn't available. - */ - @VisibleForTesting - static class LexicographicalComparerHolder { - static final String UNSAFE_COMPARER_NAME = - LexicographicalComparerHolder.class.getName() + "$UnsafeComparer"; - - static final Comparer BEST_COMPARER = getBestComparer(); - /** - * Returns the Unsafe-using Comparer, or falls back to the pure-Java - * implementation if unable to do so. - */ - static Comparer getBestComparer() { - try { - Class theClass = Class.forName(UNSAFE_COMPARER_NAME); - - // yes, UnsafeComparer does implement Comparer - @SuppressWarnings("unchecked") - Comparer comparer = - (Comparer) theClass.getEnumConstants()[0]; - return comparer; - } catch (Throwable t) { // ensure we really catch *everything* - return lexicographicalComparerJavaImpl(); - } - } - - enum PureJavaComparer implements Comparer { - INSTANCE; - - @Override - public int compareTo(byte[] buffer1, int offset1, int length1, - byte[] buffer2, int offset2, int length2) { - // Short circuit equal case - if (buffer1 == buffer2 && - offset1 == offset2 && - length1 == length2) { - return 0; - } - // Bring WritableComparator code local - int end1 = offset1 + length1; - int end2 = offset2 + length2; - for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) { - int a = (buffer1[i] & 0xff); - int b = (buffer2[j] & 0xff); - if (a != b) { - return a - b; - } - } - return length1 - length2; - } - } - - @VisibleForTesting - enum UnsafeComparer implements Comparer { - INSTANCE; - - static final Unsafe theUnsafe; - - /** The offset to the first element in a byte array. */ - static final int BYTE_ARRAY_BASE_OFFSET; - - static { - theUnsafe = (Unsafe) AccessController.doPrivileged( - new PrivilegedAction() { - @Override - public Object run() { - try { - Field f = Unsafe.class.getDeclaredField("theUnsafe"); - f.setAccessible(true); - return f.get(null); - } catch (NoSuchFieldException e) { - // It doesn't matter what we throw; - // it's swallowed in getBestComparer(). - throw new Error(); - } catch (IllegalAccessException e) { - throw new Error(); - } - } - }); - - BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class); - - // sanity check - this should never fail - if (theUnsafe.arrayIndexScale(byte[].class) != 1) { - throw new AssertionError(); - } - } - - static final boolean littleEndian = - ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN); - - /** - * Returns true if x1 is less than x2, when both values are treated as - * unsigned. - */ - static boolean lessThanUnsigned(long x1, long x2) { - return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE); - } - - /** - * Lexicographically compare two arrays. - * - * @param buffer1 left operand - * @param buffer2 right operand - * @param offset1 Where to start comparing in the left buffer - * @param offset2 Where to start comparing in the right buffer - * @param length1 How much to compare from the left buffer - * @param length2 How much to compare from the right buffer - * @return 0 if equal, < 0 if left is less than right, etc. - */ - @Override - public int compareTo(byte[] buffer1, int offset1, int length1, - byte[] buffer2, int offset2, int length2) { - // Short circuit equal case - if (buffer1 == buffer2 && - offset1 == offset2 && - length1 == length2) { - return 0; - } - int minLength = Math.min(length1, length2); - int minWords = minLength / SIZEOF_LONG; - int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET; - int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET; - - /* - * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a - * time is no slower than comparing 4 bytes at a time even on 32-bit. - * On the other hand, it is substantially faster on 64-bit. - */ - for (int i = 0; i < minWords * SIZEOF_LONG; i += SIZEOF_LONG) { - long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i); - long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i); - long diff = lw ^ rw; - - if (diff != 0) { - if (!littleEndian) { - return lessThanUnsigned(lw, rw) ? -1 : 1; - } - - // Use binary search - int n = 0; - int y; - int x = (int) diff; - if (x == 0) { - x = (int) (diff >>> 32); - n = 32; - } - - y = x << 16; - if (y == 0) { - n += 16; - } else { - x = y; - } - - y = x << 8; - if (y == 0) { - n += 8; - } - return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL)); - } - } - - // The epilogue to cover the last (minLength % 8) elements. - for (int i = minWords * SIZEOF_LONG; i < minLength; i++) { - int a = (buffer1[offset1 + i] & 0xff); - int b = (buffer2[offset2 + i] & 0xff); - if (a != b) { - return a - b; - } - } - return length1 - length2; - } - } - } - - /** - * @param left left operand - * @param right right operand - * @return True if equal - */ - public static boolean equals(final byte [] left, final byte [] right) { - // Could use Arrays.equals? - //noinspection SimplifiableConditionalExpression - if (left == right) return true; - if (left == null || right == null) return false; - if (left.length != right.length) return false; - if (left.length == 0) return true; - - // Since we're often comparing adjacent sorted data, - // it's usual to have equal arrays except for the very last byte - // so check that first - if (left[left.length - 1] != right[right.length - 1]) return false; - - return compareTo(left, right) == 0; - } - - public static boolean equals(final byte[] left, int leftOffset, int leftLen, - final byte[] right, int rightOffset, int rightLen) { - // short circuit case - if (left == right && - leftOffset == rightOffset && - leftLen == rightLen) { - return true; - } - // different lengths fast check - if (leftLen != rightLen) { - return false; - } - if (leftLen == 0) { - return true; - } - - // Since we're often comparing adjacent sorted data, - // it's usual to have equal arrays except for the very last byte - // so check that first - if (left[leftOffset + leftLen - 1] != right[rightOffset + rightLen - 1]) return false; - - return LexicographicalComparerHolder.BEST_COMPARER. - compareTo(left, leftOffset, leftLen, right, rightOffset, rightLen) == 0; - } - - - /** - * Return true if the byte array on the right is a prefix of the byte - * array on the left. - */ - public static boolean startsWith(byte[] bytes, byte[] prefix) { - return bytes != null && prefix != null && - bytes.length >= prefix.length && - LexicographicalComparerHolder.BEST_COMPARER. - compareTo(bytes, 0, prefix.length, prefix, 0, prefix.length) == 0; - } - - /** - * @param b bytes to hash - * @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the - * passed in array. This method is what {@link org.apache.hadoop.io.Text} and - * {@link ImmutableBytesWritable} use calculating hash code. - */ - public static int hashCode(final byte [] b) { - return hashCode(b, b.length); - } - - /** - * @param b value - * @param length length of the value - * @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the - * passed in array. This method is what {@link org.apache.hadoop.io.Text} and - * {@link ImmutableBytesWritable} use calculating hash code. - */ - public static int hashCode(final byte [] b, final int length) { - return WritableComparator.hashBytes(b, length); - } - - /** - * @param b bytes to hash - * @return A hash of b as an Integer that can be used as key in - * Maps. - */ - public static Integer mapKey(final byte [] b) { - return hashCode(b); - } - - /** - * @param b bytes to hash - * @param length length to hash - * @return A hash of b as an Integer that can be used as key in - * Maps. - */ - public static Integer mapKey(final byte [] b, final int length) { - return hashCode(b, length); - } - - /** - * @param a lower half - * @param b upper half - * @return New array that has a in lower half and b in upper half. - */ - public static byte [] add(final byte [] a, final byte [] b) { - return add(a, b, HConstants.EMPTY_BYTE_ARRAY); - } - - /** - * @param a first third - * @param b second third - * @param c third third - * @return New array made from a, b and c - */ - public static byte [] add(final byte [] a, final byte [] b, final byte [] c) { - byte [] result = new byte[a.length + b.length + c.length]; - System.arraycopy(a, 0, result, 0, a.length); - System.arraycopy(b, 0, result, a.length, b.length); - System.arraycopy(c, 0, result, a.length + b.length, c.length); - return result; - } - - /** - * @param a array - * @param length amount of bytes to grab - * @return First length bytes from a - */ - public static byte [] head(final byte [] a, final int length) { - if (a.length < length) { - return null; - } - byte [] result = new byte[length]; - System.arraycopy(a, 0, result, 0, length); - return result; - } - - /** - * @param a array - * @param length amount of bytes to snarf - * @return Last length bytes from a - */ - public static byte [] tail(final byte [] a, final int length) { - if (a.length < length) { - return null; - } - byte [] result = new byte[length]; - System.arraycopy(a, a.length - length, result, 0, length); - return result; - } - - /** - * @param a array - * @param length new array size - * @return Value in a plus length prepended 0 bytes - */ - public static byte [] padHead(final byte [] a, final int length) { - byte [] padding = new byte[length]; - for (int i = 0; i < length; i++) { - padding[i] = 0; - } - return add(padding,a); - } - - /** - * @param a array - * @param length new array size - * @return Value in a plus length appended 0 bytes - */ - public static byte [] padTail(final byte [] a, final int length) { - byte [] padding = new byte[length]; - for (int i = 0; i < length; i++) { - padding[i] = 0; - } - return add(a,padding); - } - - /** - * Split passed range. Expensive operation relatively. Uses BigInteger math. - * Useful splitting ranges for MapReduce jobs. - * @param a Beginning of range - * @param b End of range - * @param num Number of times to split range. Pass 1 if you want to split - * the range in two; i.e. one split. - * @return Array of dividing values - */ - public static byte [][] split(final byte [] a, final byte [] b, final int num) { - return split(a, b, false, num); - } - - /** - * Split passed range. Expensive operation relatively. Uses BigInteger math. - * Useful splitting ranges for MapReduce jobs. - * @param a Beginning of range - * @param b End of range - * @param inclusive Whether the end of range is prefix-inclusive or is - * considered an exclusive boundary. Automatic splits are generally exclusive - * and manual splits with an explicit range utilize an inclusive end of range. - * @param num Number of times to split range. Pass 1 if you want to split - * the range in two; i.e. one split. - * @return Array of dividing values - */ - public static byte[][] split(final byte[] a, final byte[] b, - boolean inclusive, final int num) { - byte[][] ret = new byte[num + 2][]; - int i = 0; - Iterable iter = iterateOnSplits(a, b, inclusive, num); - if (iter == null) - return null; - for (byte[] elem : iter) { - ret[i++] = elem; - } - return ret; - } - - /** - * Iterate over keys within the passed range, splitting at an [a,b) boundary. - */ - public static Iterable iterateOnSplits(final byte[] a, - final byte[] b, final int num) - { - return iterateOnSplits(a, b, false, num); - } - - /** - * Iterate over keys within the passed range. - */ - public static Iterable iterateOnSplits( - final byte[] a, final byte[]b, boolean inclusive, final int num) - { - byte [] aPadded; - byte [] bPadded; - if (a.length < b.length) { - aPadded = padTail(a, b.length - a.length); - bPadded = b; - } else if (b.length < a.length) { - aPadded = a; - bPadded = padTail(b, a.length - b.length); - } else { - aPadded = a; - bPadded = b; - } - if (compareTo(aPadded,bPadded) >= 0) { - throw new IllegalArgumentException("b <= a"); - } - if (num <= 0) { - throw new IllegalArgumentException("num cannot be < 0"); - } - byte [] prependHeader = {1, 0}; - final BigInteger startBI = new BigInteger(add(prependHeader, aPadded)); - final BigInteger stopBI = new BigInteger(add(prependHeader, bPadded)); - BigInteger diffBI = stopBI.subtract(startBI); - if (inclusive) { - diffBI = diffBI.add(BigInteger.ONE); - } - final BigInteger splitsBI = BigInteger.valueOf(num + 1); - if(diffBI.compareTo(splitsBI) < 0) { - return null; - } - final BigInteger intervalBI; - try { - intervalBI = diffBI.divide(splitsBI); - } catch(Exception e) { - LOG.error("Exception caught during division", e); - return null; - } - - final Iterator iterator = new Iterator() { - private int i = -1; - - @Override - public boolean hasNext() { - return i < num+1; - } - - @Override - public byte[] next() { - i++; - if (i == 0) return a; - if (i == num + 1) return b; - - BigInteger curBI = startBI.add(intervalBI.multiply(BigInteger.valueOf(i))); - byte [] padded = curBI.toByteArray(); - if (padded[1] == 0) - padded = tail(padded, padded.length - 2); - else - padded = tail(padded, padded.length - 1); - return padded; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - }; - - return new Iterable() { - @Override - public Iterator iterator() { - return iterator; - } - }; - } - - /** - * @param bytes array to hash - * @param offset offset to start from - * @param length length to hash - * */ - public static int hashCode(byte[] bytes, int offset, int length) { - int hash = 1; - for (int i = offset; i < offset + length; i++) - hash = (31 * hash) + (int) bytes[i]; - return hash; - } - - /** - * @param t operands - * @return Array of byte arrays made from passed array of Text - */ - public static byte [][] toByteArrays(final String [] t) { - byte [][] result = new byte[t.length][]; - for (int i = 0; i < t.length; i++) { - result[i] = Bytes.toBytes(t[i]); - } - return result; - } - - /** - * @param column operand - * @return A byte array of a byte array where first and only entry is - * column - */ - public static byte [][] toByteArrays(final String column) { - return toByteArrays(toBytes(column)); - } - - /** - * @param column operand - * @return A byte array of a byte array where first and only entry is - * column - */ - public static byte [][] toByteArrays(final byte [] column) { - byte [][] result = new byte[1][]; - result[0] = column; - return result; - } - - /** - * Binary search for keys in indexes. - * - * @param arr array of byte arrays to search for - * @param key the key you want to find - * @param offset the offset in the key you want to find - * @param length the length of the key - * @param comparator a comparator to compare. - * @return zero-based index of the key, if the key is present in the array. - * Otherwise, a value -(i + 1) such that the key is between arr[i - - * 1] and arr[i] non-inclusively, where i is in [0, i], if we define - * arr[-1] = -Inf and arr[N] = Inf for an N-element array. The above - * means that this function can return 2N + 1 different values - * ranging from -(N + 1) to N - 1. - */ - public static int binarySearch(byte [][]arr, byte []key, int offset, - int length, RawComparator comparator) { - int low = 0; - int high = arr.length - 1; - - while (low <= high) { - int mid = (low+high) >>> 1; - // we have to compare in this order, because the comparator order - // has special logic when the 'left side' is a special key. - int cmp = comparator.compare(key, offset, length, - arr[mid], 0, arr[mid].length); - // key lives above the midpoint - if (cmp > 0) - low = mid + 1; - // key lives below the midpoint - else if (cmp < 0) - high = mid - 1; - // BAM. how often does this really happen? - else - return mid; - } - return - (low+1); - } - - /** - * Bytewise binary increment/deincrement of long contained in byte array - * on given amount. - * - * @param value - array of bytes containing long (length <= SIZEOF_LONG) - * @param amount value will be incremented on (deincremented if negative) - * @return array of bytes containing incremented long (length == SIZEOF_LONG) - */ - public static byte [] incrementBytes(byte[] value, long amount) - { - byte[] val = value; - if (val.length < SIZEOF_LONG) { - // Hopefully this doesn't happen too often. - byte [] newvalue; - if (val[0] < 0) { - newvalue = new byte[]{-1, -1, -1, -1, -1, -1, -1, -1}; - } else { - newvalue = new byte[SIZEOF_LONG]; - } - System.arraycopy(val, 0, newvalue, newvalue.length - val.length, - val.length); - val = newvalue; - } else if (val.length > SIZEOF_LONG) { - throw new IllegalArgumentException("Increment Bytes - value too big: " + - val.length); - } - if(amount == 0) return val; - if(val[0] < 0){ - return binaryIncrementNeg(val, amount); - } - return binaryIncrementPos(val, amount); - } - - /* increment/deincrement for positive value */ - private static byte [] binaryIncrementPos(byte [] value, long amount) { - long amo = amount; - int sign = 1; - if (amount < 0) { - amo = -amount; - sign = -1; - } - for(int i=0;i> 8); - int val = value[value.length-i-1] & 0x0ff; - int total = val + cur; - if(total > 255) { - amo += sign; - total %= 256; - } else if (total < 0) { - amo -= sign; - } - value[value.length-i-1] = (byte)total; - if (amo == 0) return value; - } - return value; - } - - /* increment/deincrement for negative value */ - private static byte [] binaryIncrementNeg(byte [] value, long amount) { - long amo = amount; - int sign = 1; - if (amount < 0) { - amo = -amount; - sign = -1; - } - for(int i=0;i> 8); - int val = ((~value[value.length-i-1]) & 0x0ff) + 1; - int total = cur - val; - if(total >= 0) { - amo += sign; - } else if (total < -256) { - amo -= sign; - total %= 256; - } - value[value.length-i-1] = (byte)total; - if (amo == 0) return value; - } - return value; - } - - /** - * Writes a string as a fixed-size field, padded with zeros. - */ - public static void writeStringFixedSize(final DataOutput out, String s, - int size) throws IOException { - byte[] b = toBytes(s); - if (b.length > size) { - throw new IOException("Trying to write " + b.length + " bytes (" + - toStringBinary(b) + ") into a field of length " + size); - } - - out.writeBytes(s); - for (int i = 0; i < size - s.length(); ++i) - out.writeByte(0); - } - - /** - * Reads a fixed-size field and interprets it as a string padded with zeros. - */ - public static String readStringFixedSize(final DataInput in, int size) - throws IOException { - byte[] b = new byte[size]; - in.readFully(b); - int n = b.length; - while (n > 0 && b[n - 1] == 0) - --n; - - return toString(b, 0, n); - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java deleted file mode 100644 index fb16ad8..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import org.apache.commons.logging.LogFactory; -import java.io.PrintWriter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.VersionAnnotation; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.commons.logging.Log; - -/** - * This class finds the package info for hbase and the VersionAnnotation - * information. Taken from hadoop. Only name of annotation is different. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class VersionInfo { - private static final Log LOG = LogFactory.getLog(VersionInfo.class.getName()); - private static Package myPackage; - private static VersionAnnotation version; - - static { - myPackage = VersionAnnotation.class.getPackage(); - version = myPackage.getAnnotation(VersionAnnotation.class); - } - - /** - * Get the meta-data for the hbase package. - * @return package - */ - static Package getPackage() { - return myPackage; - } - - /** - * Get the hbase version. - * @return the hbase version string, eg. "0.6.3-dev" - */ - public static String getVersion() { - return version != null ? version.version() : "Unknown"; - } - - /** - * Get the subversion revision number for the root directory - * @return the revision number, eg. "451451" - */ - public static String getRevision() { - return version != null ? version.revision() : "Unknown"; - } - - /** - * The date that hbase was compiled. - * @return the compilation date in unix date format - */ - public static String getDate() { - return version != null ? version.date() : "Unknown"; - } - - /** - * The user that compiled hbase. - * @return the username of the user - */ - public static String getUser() { - return version != null ? version.user() : "Unknown"; - } - - /** - * Get the subversion URL for the root hbase directory. - * @return the url - */ - public static String getUrl() { - return version != null ? version.url() : "Unknown"; - } - - static String[] versionReport() { - return new String[] { - "HBase " + getVersion(), - "Subversion " + getUrl() + " -r " + getRevision(), - "Compiled by " + getUser() + " on " + getDate() - }; - } - - public static void writeTo(PrintWriter out) { - for (String line : versionReport()) { - out.println(line); - } - } - - public static void logVersion() { - for (String line : versionReport()) { - LOG.info(line); - } - } - - public static void main(String[] args) { - logVersion(); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/LargeTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/LargeTests.java deleted file mode 100644 index f1b46fa..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/LargeTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2011 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -/** - * Tag a test as 'large', meaning that the test class has the following - * characteristics: - * - executed in an isolated JVM. Tests can however be executed in different - * JVM on the same machine simultaneously. - * - will not have to be executed by the developer before submitting a bug - * - ideally, last less than 2 minutes to help parallelization - * - * It the worst case compared to small or medium, use it only for tests that - * you cannot put in the other categories - * - * @see SmallTests - * @see MediumTests - */ -public interface LargeTests { -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MediumTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MediumTests.java deleted file mode 100644 index bbbde7c..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MediumTests.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2011 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -/** - * Tag a test as 'Medium', meaning that the test class has the following - * characteristics: - * - executed in an isolated JVM. Tests can however be executed in different - * JVM on the same machine simultaneously. - * - will have to be executed by the developer before submitting a bug - * - ideally, last less than 1 minutes to help parallelization - * - * Use it for tests that cannot be tagged as 'Small'. - * - * @see SmallTests - * @see LargeTests - */ -public interface MediumTests { -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/SmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/SmallTests.java deleted file mode 100644 index c702f5a..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/SmallTests.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2011 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -/** - * Tag a test as 'small', meaning that the test class has the following - * characteristics: - * - can be run simultaneously with other small tests in the same JVM - * - ideally, last less than 15 seconds - * - does not use a cluster - * - * @see MediumTests - * @see LargeTests - */ -public interface SmallTests { -} diff --git a/hbase-site/src/docbkx/developer.xml b/hbase-site/src/docbkx/developer.xml index 3b90ca2..a2595dc 100644 --- a/hbase-site/src/docbkx/developer.xml +++ b/hbase-site/src/docbkx/developer.xml @@ -69,18 +69,27 @@ git clone git://git.apache.org/hbase.git If you cloned the project via git, download and install the Git plugin (EGit). Attach to your local git repo (via the Git Repositories window) and you'll be able to see file revision history, generate patches, etc.
- HBase Project Setup - To set up your Eclipse environment for HBase, close Eclipse and execute... - -mvn eclipse:eclipse - + HBase Project Setup in Eclipse + The easiest way is to use the m2eclipse plugin for Eclipse. Eclipse Indigo or newer has m2eclipse built-in, or it can be found here:http://www.eclipse.org/m2e/. M2Eclipse provides Maven integration for Eclipse - it even lets you use the direct Maven commands from within Eclipse to compile and test your project. + To import the project, you merely need to go to File->Import...Maven->Existing Maven Projects and then point Eclipse at the HBase root directory; m2eclipse will automatically find all the hbase modules for you. + If you install m2eclipse and import HBase in your workspace, you will have to fix your eclipse Build Path. + Remove target folder, add target/generated-jamon + and target/generated-sources/java folders. You may also remove from your Build Path + the exclusions on the src/main/resources and src/test/resources + to avoid error message in the console 'Failed to execute goal org.apache.maven.plugins:maven-antrun-plugin:1.6:run (default) on project hbase: + 'An Ant BuildException has occured: Replace: source file .../target/classes/hbase-default.xml doesn't exist'. This will also + reduce the eclipse build cycles and make your life easier when developing. +
+
+ Import into eclipse with the command line + For those not inclined to use m2eclipse, you can generate the Eclipse files from the command line. First, run (you should only have to do this once): + mvn clean install -DskipTests + and then close Eclipse and execute... + mvn eclipse:eclipse ... from your local HBase project directory in your workspace to generate some new .project and .classpathfiles. Then reopen Eclipse, and import the .project file in the HBase directory to a workspace. +
-
- Maven Plugin - Download and install the Maven plugin. For example, Help -> Install New Software -> (search for Maven Plugin) -
Maven Classpath Variable The M2_REPO classpath variable needs to be set up for the project. This needs to be set to @@ -95,16 +104,6 @@ Unbound classpath variable: 'M2_REPO/com/google/guava/guava/r09/guava-r09.jar' i Unbound classpath variable: 'M2_REPO/com/google/protobuf/protobuf-java/2.3.0/protobuf-java-2.3.0.jar' in project 'hbase' hbase Build path Build Path Problem Unbound classpath variable:
-
- Import via m2eclipse - If you install the m2eclipse and import the HBase pom.xml in your workspace, you will have to fix your eclipse Build Path. - Remove target folder, add target/generated-jamon - and target/generated-sources/java folders. You may also remove from your Build Path - the exclusions on the src/main/resources and src/test/resources - to avoid error message in the console 'Failed to execute goal org.apache.maven.plugins:maven-antrun-plugin:1.6:run (default) on project hbase: - 'An Ant BuildException has occured: Replace: source file .../target/classes/hbase-default.xml doesn't exist'. This will also - reduce the eclipse build cycles and make your life easier when developing. -
Eclipse Known Issues Eclipse will currently complain about Bytes.java. It is not possible to turn these errors off. @@ -617,6 +616,21 @@ mvn compile This convention comes from our parent project Hadoop.
+
+ Running In-Situ + If you are developing HBase, frequently it is useful to test your changes against a more-real cluster than what you find in unit tests. In this case, HBase can be run directly from the source in local-mode. + All you need to do is run: + + ${HBASE_HOME}/bin/start-hbase.sh + + This will spin up a full local-cluster, just as if you had packaged up HBase and installed it on your machine. + + Keep in mind that you will need to have installed HBase into your local maven repository for the in-situ cluster to work properly. That is, you will need to run: + mvn clean install -DskipTests + to ensure that maven can find the correct classpath and dependencies. Generally, the above command + is just a good thing to try running first, if maven is acting oddly. +
+
diff --git a/pom.xml b/pom.xml index f8fdbfb..cb7ca1c 100644 --- a/pom.xml +++ b/pom.xml @@ -56,6 +56,7 @@ hbase-assembly hbase-server hbase-site + hbase-common @@ -730,6 +731,11 @@ + org.apache.hbase + hbase-common + ${project.version} + + hbase-server org.apache.hbase ${project.version}