From ef650c2c9648cf4dba5f4983b77fa46f869d0586 Mon Sep 17 00:00:00 2001 From: haxiaolin Date: Wed, 4 Jul 2018 13:33:47 +0800 Subject: [PATCH] HBASE-20819 Use TableDescriptor to replace HTableDescriptor in hbase-shell module --- hbase-shell/src/main/ruby/hbase/admin.rb | 207 +++++++++++++-------------- hbase-shell/src/main/ruby/hbase_constants.rb | 6 +- 2 files changed, 104 insertions(+), 109 deletions(-) diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index c385d3dabf..85ae6c4ffa 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -25,6 +25,8 @@ java_import org.apache.hadoop.hbase.util.RegionSplitter java_import org.apache.hadoop.hbase.util.Bytes java_import org.apache.hadoop.hbase.ServerName java_import org.apache.hadoop.hbase.TableName +java_import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder +java_import org.apache.hadoop.hbase.client.TableDescriptorBuilder # Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin @@ -362,7 +364,7 @@ module Hbase has_columns = false # Start defining the table - htd = org.apache.hadoop.hbase.HTableDescriptor.new(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(table_name)) splits = nil # Args are either columns or splits, add them to the table definition # TODO: add table options support @@ -375,20 +377,20 @@ module Hbase if arg.is_a?(String) || arg.key?(NAME) # If the arg is a string, default action is to add a column to the table. # If arg has a name, it must also be a column descriptor. - descriptor = hcd(arg, htd) + descriptor = hcd(arg, tdb) # Warn if duplicate columns are added - if htd.hasFamily(descriptor.getName) + if tdb.build.hasColumnFamily(descriptor.getName) puts "Family '" + descriptor.getNameAsString + "' already exists, the old one will be replaced" - htd.modifyFamily(descriptor) + tdb.modifyColumnFamily(descriptor) else - htd.addFamily(descriptor) + tdb.setColumnFamily(descriptor) end has_columns = true next end if arg.key?(REGION_REPLICATION) region_replication = JInteger.valueOf(arg.delete(REGION_REPLICATION)) - htd.setRegionReplication(region_replication) + tdb.setRegionReplication(region_replication) end # Get rid of the "METHOD", which is deprecated for create. @@ -408,7 +410,7 @@ module Hbase File.foreach(splits_file) do |line| arg[SPLITS].push(line.chomp) end - htd.setValue(SPLITS_FILE, arg[SPLITS_FILE]) + tdb.setValue(SPLITS_FILE, arg[SPLITS_FILE]) end if arg.key?(SPLITS) @@ -429,7 +431,7 @@ module Hbase end # Done with splits; apply formerly-table_att parameters. - update_htd_from_arg(htd, arg) + update_tdb_from_arg(tdb, arg) arg.each_key do |ignored_key| puts(format('An argument ignored (unknown or overridden): %s', ignored_key)) @@ -439,13 +441,8 @@ module Hbase # Fail if no column families defined raise(ArgumentError, 'Table must have at least one column family') unless has_columns - if splits.nil? - # Perform the create table call - @admin.createTable(htd) - else - # Perform the create table call - @admin.createTable(htd, splits) - end + # Perform the create table call + @admin.createTable(tdb.build, splits) end #---------------------------------------------------------------------------------------------- @@ -614,7 +611,7 @@ module Hbase table_name = TableName.valueOf(table_name_str) # Get table descriptor - htd = org.apache.hadoop.hbase.HTableDescriptor.new(@admin.getTableDescriptor(table_name)) + tdb = TableDescriptorBuilder.newBuilder(@admin.getTableDescriptor(table_name)) hasTableUpdate = false # Process all args @@ -629,14 +626,14 @@ module Hbase # 1) Column family spec. Distinguished by having a NAME and no METHOD. method = arg.delete(METHOD) if method.nil? && arg.key?(NAME) - descriptor = hcd(arg, htd) + descriptor = hcd(arg, tdb) column_name = descriptor.getNameAsString # If column already exist, then try to alter it. Create otherwise. - if htd.hasFamily(column_name.to_java_bytes) - htd.modifyFamily(descriptor) + if tdb.build.hasColumnFamily(column_name.to_java_bytes) + tdb.modifyColumnFamily(descriptor) else - htd.addFamily(descriptor) + tdb.setColumnFamily(descriptor) end hasTableUpdate = true next @@ -648,23 +645,21 @@ module Hbase # Delete column family if method == 'delete' raise(ArgumentError, 'NAME parameter missing for delete method') unless name - htd.removeFamily(name.to_java_bytes) + tdb.removeColumnFamily(name.to_java_bytes) hasTableUpdate = true # Unset table attributes elsif method == 'table_att_unset' raise(ArgumentError, 'NAME parameter missing for table_att_unset method') unless name if name.is_a?(Array) name.each do |key| - if htd.getValue(key).nil? + if tdb.setValue(key, nil).nil? raise ArgumentError, "Could not find attribute: #{key}" end - htd.remove(key) end else - if htd.getValue(name).nil? + if tdb.setValue(name, nil).nil? raise ArgumentError, "Could not find attribute: #{name}" end - htd.remove(name) end hasTableUpdate = true # Unset table configuration @@ -672,16 +667,14 @@ module Hbase raise(ArgumentError, 'NAME parameter missing for table_conf_unset method') unless name if name.is_a?(Array) name.each do |key| - if htd.getConfigurationValue(key).nil? + if tdb.setValue(key, nil).nil? raise ArgumentError, "Could not find configuration: #{key}" end - htd.removeConfiguration(key) end else - if htd.getConfigurationValue(name).nil? + if tdb.setValue(name, nil).nil? raise ArgumentError, "Could not find configuration: #{name}" end - htd.removeConfiguration(name) end hasTableUpdate = true # Unknown method @@ -697,7 +690,7 @@ module Hbase end # 3) Some args for the table, optionally with METHOD => table_att (deprecated) - update_htd_from_arg(htd, arg) + update_tdb_from_arg(tdb, arg) # set a coprocessor attribute valid_coproc_keys = [] @@ -710,7 +703,7 @@ module Hbase v = String.new(value) v.strip! # TODO: We should not require user to config the coprocessor with our inner format. - htd.addCoprocessorWithSpec(v) + tdb.addCoprocessorWithSpec(v) valid_coproc_keys << key end @@ -729,7 +722,7 @@ module Hbase # Bulk apply all table modifications. if hasTableUpdate - @admin.modifyTable(table_name, htd) + @admin.modifyTable(table_name, tdb.build) if wait == true puts 'Updating all regions with the new schema...' @@ -848,101 +841,103 @@ module Hbase end #---------------------------------------------------------------------------------------------- - # Return a new HColumnDescriptor made of passed args - def hcd(arg, htd) + # Return a new ColumnFamilyDescriptor made of passed args + def hcd(arg, tdb) # String arg, single parameter constructor - return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.is_a?(String) + + return ColumnFamilyDescriptorBuilder.of(arg) if arg.is_a?(String) raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg.delete(NAME) - family = htd.getFamily(name.to_java_bytes) + cfd = tdb.build.getColumnFamily(name.to_java_bytes) + unless cfd.nil? + cfdb = ColumnFamilyDescriptorBuilder.newBuilder(cfd) + end # create it if it's a new family - family ||= org.apache.hadoop.hbase.HColumnDescriptor.new(name.to_java_bytes) - - family.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE) - family.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE) - family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) - family.setCacheIndexesOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_INDEX_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_INDEX_ON_WRITE) - family.setCacheBloomsOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_BLOOMS_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_BLOOMS_ON_WRITE) - family.setEvictBlocksOnClose(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::EVICT_BLOCKS_ON_CLOSE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::EVICT_BLOCKS_ON_CLOSE) - family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) - family.setInMemoryCompaction( - org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)) + cfdb ||= ColumnFamilyDescriptorBuilder.newBuilder(name.to_java_bytes) + + cfdb.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::BLOCKCACHE))) if arg.include?(ColumnFamilyDescriptorBuilder::BLOCKCACHE) + cfdb.setScope(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::REPLICATION_SCOPE))) if arg.include?(ColumnFamilyDescriptorBuilder::REPLICATION_SCOPE) + cfdb.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::CACHE_DATA_ON_WRITE))) if arg.include?(ColumnFamilyDescriptorBuilder::CACHE_DATA_ON_WRITE) + cfdb.setCacheIndexesOnWrite(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::CACHE_INDEX_ON_WRITE))) if arg.include?(ColumnFamilyDescriptorBuilder::CACHE_INDEX_ON_WRITE) + cfdb.setCacheBloomsOnWrite(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::CACHE_BLOOMS_ON_WRITE))) if arg.include?(ColumnFamilyDescriptorBuilder::CACHE_BLOOMS_ON_WRITE) + cfdb.setEvictBlocksOnClose(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::EVICT_BLOCKS_ON_CLOSE))) if arg.include?(ColumnFamilyDescriptorBuilder::EVICT_BLOCKS_ON_CLOSE) + cfdb.setInMemory(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::IN_MEMORY))) if arg.include?(ColumnFamilyDescriptorBuilder::IN_MEMORY) + if arg.include?(ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION) + cfdb.setInMemoryCompaction( + org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION)) ) end - family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) - family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) - family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) - family.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS) - family.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS) - family.setKeepDeletedCells(org.apache.hadoop.hbase.KeepDeletedCells.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS).to_s.upcase)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS) - family.setCompressTags(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESS_TAGS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESS_TAGS) - family.setPrefetchBlocksOnOpen(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::PREFETCH_BLOCKS_ON_OPEN))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::PREFETCH_BLOCKS_ON_OPEN) - family.setMobEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IS_MOB))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IS_MOB) - family.setMobThreshold(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MOB_THRESHOLD))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MOB_THRESHOLD) - family.setNewVersionBehavior(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::NEW_VERSION_BEHAVIOR))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::NEW_VERSION_BEHAVIOR) - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER) - bloomtype = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER).upcase.to_sym + cfdb.setTimeToLive(arg.delete(ColumnFamilyDescriptorBuilder::TTL)) if arg.include?(ColumnFamilyDescriptorBuilder::TTL) + cfdb.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::DATA_BLOCK_ENCODING))) if arg.include?(ColumnFamilyDescriptorBuilder::DATA_BLOCK_ENCODING) + cfdb.setBlocksize(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::BLOCKSIZE))) if arg.include?(ColumnFamilyDescriptorBuilder::BLOCKSIZE) + cfdb.setMaxVersions(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::VERSIONS))) if arg.include?(ColumnFamilyDescriptorBuilder::VERSIONS) + cfdb.setMinVersions(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::MIN_VERSIONS))) if arg.include?(ColumnFamilyDescriptorBuilder::MIN_VERSIONS) + cfdb.setKeepDeletedCells(org.apache.hadoop.hbase.KeepDeletedCells.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::KEEP_DELETED_CELLS).to_s.upcase)) if arg.include?(ColumnFamilyDescriptorBuilder::KEEP_DELETED_CELLS) + cfdb.setCompressTags(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::COMPRESS_TAGS))) if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESS_TAGS) + cfdb.setPrefetchBlocksOnOpen(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::PREFETCH_BLOCKS_ON_OPEN))) if arg.include?(ColumnFamilyDescriptorBuilder::PREFETCH_BLOCKS_ON_OPEN) + cfdb.setMobEnabled(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::IS_MOB))) if arg.include?(ColumnFamilyDescriptorBuilder::IS_MOB) + cfdb.setMobThreshold(JLong.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::MOB_THRESHOLD))) if arg.include?(ColumnFamilyDescriptorBuilder::MOB_THRESHOLD) + cfdb.setNewVersionBehavior(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::NEW_VERSION_BEHAVIOR))) if arg.include?(ColumnFamilyDescriptorBuilder::NEW_VERSION_BEHAVIOR) + if arg.include?(ColumnFamilyDescriptorBuilder::BLOOMFILTER) + bloomtype = arg.delete(ColumnFamilyDescriptorBuilder::BLOOMFILTER).upcase.to_sym if org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype) - family.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype)) + cfdb.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype)) else raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(' ')) end end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION) - compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION).upcase.to_sym + if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESSION) + compression = arg.delete(ColumnFamilyDescriptorBuilder::COMPRESSION).upcase.to_sym if org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression) - family.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) + cfdb.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) else raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(' ')) end end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION) - algorithm = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION).upcase - family.setEncryptionType(algorithm) - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY) + if arg.include?(ColumnFamilyDescriptorBuilder::ENCRYPTION) + algorithm = arg.delete(ColumnFamilyDescriptorBuilder::ENCRYPTION).upcase + cfdb.setEncryptionType(algorithm) + if arg.include?(ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY) key = org.apache.hadoop.hbase.io.crypto.Encryption.pbkdf128( - arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY) + arg.delete(ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY) ) - family.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key, + cfdb.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key, algorithm)) end end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT) - compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT).upcase.to_sym + if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT) + compression = arg.delete(ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT).upcase.to_sym if org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression) - family.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) + cfdb.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) else raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(' ')) end end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY) - storage_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY).upcase - family.setStoragePolicy(storage_policy) + if arg.include?(ColumnFamilyDescriptorBuilder::STORAGE_POLICY) + storage_policy = arg.delete(ColumnFamilyDescriptorBuilder::STORAGE_POLICY).upcase + cfdb.setStoragePolicy(storage_policy) end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MOB_COMPACT_PARTITION_POLICY) - mob_partition_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MOB_COMPACT_PARTITION_POLICY).upcase.to_sym - if org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.include?(mob_partition_policy) - family.setMobCompactPartitionPolicy(org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.valueOf(mob_partition_policy)) + if arg.include?(ColumnFamilyDescriptorBuilder::MOB_COMPACT_PARTITION_POLICY) + mob_partition_policy = arg.delete(ColumnFamilyDescriptorBuilder::MOB_COMPACT_PARTITION_POLICY).upcase.to_sym + if MobCompactPartitionPolicy.constants.include?(mob_partition_policy) + cfdb.setMobCompactPartitionPolicy(MobCompactPartitionPolicy.valueOf(mob_partition_policy)) else - raise(ArgumentError, "MOB_COMPACT_PARTITION_POLICY #{mob_partition_policy} is not supported. Use one of " + org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.join(' ')) + raise(ArgumentError, "MOB_COMPACT_PARTITION_POLICY #{mob_partition_policy} is not supported. Use one of " + MobCompactPartitionPolicy.constants.join(' ')) end end - set_user_metadata(family, arg.delete(METADATA)) if arg[METADATA] - set_descriptor_config(family, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] - if arg.include?(org.apache.hadoop.hbase - .HColumnDescriptor::DFS_REPLICATION) - family.setDFSReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase - .HColumnDescriptor::DFS_REPLICATION))) + set_user_metadata(cfdb, arg.delete(METADATA)) if arg[METADATA] + set_descriptor_config(cfdb, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + if arg.include?(ColumnFamilyDescriptorBuilder::DFS_REPLICATION) + cfdb.setDFSReplication(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::DFS_REPLICATION))) end arg.each_key do |unknown_key| puts(format('Unknown argument ignored for column family %s: %s', name, unknown_key)) end - family + cfdb.build end #---------------------------------------------------------------------------------------------- @@ -1096,7 +1091,7 @@ module Hbase raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.is_a?(Hash) for k, v in config v = v.to_s unless v.nil? - descriptor.setConfiguration(k, v) + descriptor.setValue(k, v) end end @@ -1230,22 +1225,22 @@ module Hbase @admin.getLocks end - # Parse arguments and update HTableDescriptor accordingly - def update_htd_from_arg(htd, arg) - htd.setOwnerString(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::OWNER)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::OWNER) - htd.setMaxFileSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE) - htd.setReadOnly(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::READONLY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::READONLY) - htd.setCompactionEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED) - htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED) - htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MEMSTORE_FLUSHSIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MEMSTORE_FLUSHSIZE) - htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::DURABILITY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::DURABILITY) - htd.setPriority(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::PRIORITY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::PRIORITY) - htd.setFlushPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY) - htd.setRegionMemstoreReplication(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION) - htd.setRegionSplitPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY) - htd.setRegionReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION) - set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] - set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + # Parse arguments and update TableDescriptorBuilder accordingly + def update_tdb_from_arg(tdb, arg) + tdb.setOwnerString(arg.delete(TableDescriptorBuilder::OWNER)) if arg.include?(TableDescriptorBuilder::OWNER) + tdb.setMaxFileSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MAX_FILESIZE))) if arg.include?(TableDescriptorBuilder::MAX_FILESIZE) + tdb.setReadOnly(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::READONLY))) if arg.include?(TableDescriptorBuilder::READONLY) + tdb.setCompactionEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::COMPACTION_ENABLED))) if arg.include?(TableDescriptorBuilder::COMPACTION_ENABLED) + tdb.setNormalizationEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZATION_ENABLED))) if arg.include?(TableDescriptorBuilder::NORMALIZATION_ENABLED) + tdb.setMemStoreFlushSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE))) if arg.include?(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE) + tdb.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(TableDescriptorBuilder::DURABILITY))) if arg.include?(TableDescriptorBuilder::DURABILITY) + tdb.setPriority(JInteger.valueOf(arg.delete(TableDescriptorBuilder::PRIORITY))) if arg.include?(TableDescriptorBuilder::PRIORITY) + tdb.setFlushPolicyClassName(arg.delete(TableDescriptorBuilder::FLUSH_POLICY)) if arg.include?(TableDescriptorBuilder::FLUSH_POLICY) + tdb.setRegionMemStoreReplication(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::REGION_MEMSTORE_REPLICATION))) if arg.include?(TableDescriptorBuilder::REGION_MEMSTORE_REPLICATION) + tdb.setRegionSplitPolicyClassName(arg.delete(TableDescriptorBuilder::SPLIT_POLICY)) if arg.include?(TableDescriptorBuilder::SPLIT_POLICY) + tdb.setRegionReplication(JInteger.valueOf(arg.delete(TableDescriptorBuilder::REGION_REPLICATION))) if arg.include?(TableDescriptorBuilder::REGION_REPLICATION) + set_user_metadata(tdb, arg.delete(METADATA)) if arg[METADATA] + set_descriptor_config(tdb, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] end #---------------------------------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb index 28484cbc57..9a967d837d 100644 --- a/hbase-shell/src/main/ruby/hbase_constants.rb +++ b/hbase-shell/src/main/ruby/hbase_constants.rb @@ -39,7 +39,7 @@ module HBaseConstants NAME = org.apache.hadoop.hbase.HConstants::NAME VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY - IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION + IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION METADATA = org.apache.hadoop.hbase.HConstants::METADATA STOPROW = 'STOPROW'.freeze STARTROW = 'STARTROW'.freeze @@ -100,8 +100,8 @@ module HBaseConstants end end - promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants) - promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants) + promote_constants(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.constants) + promote_constants(org.apache.hadoop.hbase.client.TableDescriptorBuilder.constants) end # Include classes definition -- 2.14.1