From 758e611dbcf8def166be2bbe5581f60252431373 Mon Sep 17 00:00:00 2001 From: haxiaolin Date: Tue, 3 Jul 2018 18:23:49 +0800 Subject: [PATCH] HBASE-20819 Use TableDescriptor to replace HTableDescriptor in hbase-shell module --- hbase-shell/src/main/ruby/hbase/admin.rb | 200 ++++++++++++++------------- hbase-shell/src/main/ruby/hbase_constants.rb | 6 +- 2 files changed, 104 insertions(+), 102 deletions(-) diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index c385d3dabf..adbe2ea54c 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -362,7 +362,7 @@ module Hbase has_columns = false # Start defining the table - htd = org.apache.hadoop.hbase.HTableDescriptor.new(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + tdb = org.apache.hadoop.hbase.client.TableDescriptorBuilder.newBuilder(org.apache.hadoop.hbase.TableName.valueOf(table_name)) splits = nil # Args are either columns or splits, add them to the table definition # TODO: add table options support @@ -375,20 +375,20 @@ module Hbase if arg.is_a?(String) || arg.key?(NAME) # If the arg is a string, default action is to add a column to the table. # If arg has a name, it must also be a column descriptor. - descriptor = hcd(arg, htd) + descriptor = hcd(arg, tdb) # Warn if duplicate columns are added - if htd.hasFamily(descriptor.getName) + if tdb.build.hasColumnFamily(descriptor.getName) puts "Family '" + descriptor.getNameAsString + "' already exists, the old one will be replaced" - htd.modifyFamily(descriptor) + tdb.modifyColumnFamily(descriptor) else - htd.addFamily(descriptor) + tdb.addColumnFamily(descriptor) end has_columns = true next end if arg.key?(REGION_REPLICATION) region_replication = JInteger.valueOf(arg.delete(REGION_REPLICATION)) - htd.setRegionReplication(region_replication) + tdb.setRegionReplication(region_replication) end # Get rid of the "METHOD", which is deprecated for create. @@ -408,7 +408,7 @@ module Hbase File.foreach(splits_file) do |line| arg[SPLITS].push(line.chomp) end - htd.setValue(SPLITS_FILE, arg[SPLITS_FILE]) + tdb.setValue(SPLITS_FILE, arg[SPLITS_FILE]) end if arg.key?(SPLITS) @@ -429,7 +429,7 @@ module Hbase end # Done with splits; apply formerly-table_att parameters. - update_htd_from_arg(htd, arg) + update_htd_from_arg(tdb, arg) arg.each_key do |ignored_key| puts(format('An argument ignored (unknown or overridden): %s', ignored_key)) @@ -441,10 +441,10 @@ module Hbase if splits.nil? # Perform the create table call - @admin.createTable(htd) + @admin.createTable(tdb.build) else # Perform the create table call - @admin.createTable(htd, splits) + @admin.createTable(tdb.build, splits) end end @@ -614,7 +614,7 @@ module Hbase table_name = TableName.valueOf(table_name_str) # Get table descriptor - htd = org.apache.hadoop.hbase.HTableDescriptor.new(@admin.getTableDescriptor(table_name)) + tdb = org.apache.hadoop.hbase.client.TableDescriptorBuilder.newBuilder(@admin.getTableDescriptor(table_name)) hasTableUpdate = false # Process all args @@ -629,14 +629,14 @@ module Hbase # 1) Column family spec. Distinguished by having a NAME and no METHOD. method = arg.delete(METHOD) if method.nil? && arg.key?(NAME) - descriptor = hcd(arg, htd) + descriptor = hcd(arg, tdb) column_name = descriptor.getNameAsString # If column already exist, then try to alter it. Create otherwise. - if htd.hasFamily(column_name.to_java_bytes) - htd.modifyFamily(descriptor) + if tdb.build.hasColumnFamily(column_name.to_java_bytes) + tdb.modifyColumnFamily(descriptor) else - htd.addFamily(descriptor) + tdb.addColumnFamily(descriptor) end hasTableUpdate = true next @@ -648,23 +648,23 @@ module Hbase # Delete column family if method == 'delete' raise(ArgumentError, 'NAME parameter missing for delete method') unless name - htd.removeFamily(name.to_java_bytes) + tdb.removeColumnFamily(name.to_java_bytes) hasTableUpdate = true # Unset table attributes elsif method == 'table_att_unset' raise(ArgumentError, 'NAME parameter missing for table_att_unset method') unless name if name.is_a?(Array) name.each do |key| - if htd.getValue(key).nil? + if tdb.build.getValue(key).nil? raise ArgumentError, "Could not find attribute: #{key}" end - htd.remove(key) + tdb.setValue(key, nil) end else - if htd.getValue(name).nil? + if tdb.build.getValue(name).nil? raise ArgumentError, "Could not find attribute: #{name}" end - htd.remove(name) + tdb.setValue(name, nil) end hasTableUpdate = true # Unset table configuration @@ -672,16 +672,16 @@ module Hbase raise(ArgumentError, 'NAME parameter missing for table_conf_unset method') unless name if name.is_a?(Array) name.each do |key| - if htd.getConfigurationValue(key).nil? + if tdb.build.getValue(key).nil? raise ArgumentError, "Could not find configuration: #{key}" end - htd.removeConfiguration(key) + tdb.setValue(key, nil) end else - if htd.getConfigurationValue(name).nil? + if tdb.build.getValue(name).nil? raise ArgumentError, "Could not find configuration: #{name}" end - htd.removeConfiguration(name) + tdb.setValue(name, nil) end hasTableUpdate = true # Unknown method @@ -697,7 +697,7 @@ module Hbase end # 3) Some args for the table, optionally with METHOD => table_att (deprecated) - update_htd_from_arg(htd, arg) + update_htd_from_arg(tdb, arg) # set a coprocessor attribute valid_coproc_keys = [] @@ -710,7 +710,7 @@ module Hbase v = String.new(value) v.strip! # TODO: We should not require user to config the coprocessor with our inner format. - htd.addCoprocessorWithSpec(v) + tdb.addCoprocessorWithSpec(v) valid_coproc_keys << key end @@ -729,7 +729,7 @@ module Hbase # Bulk apply all table modifications. if hasTableUpdate - @admin.modifyTable(table_name, htd) + @admin.modifyTable(table_name, tdb.build) if wait == true puts 'Updating all regions with the new schema...' @@ -848,101 +848,103 @@ module Hbase end #---------------------------------------------------------------------------------------------- - # Return a new HColumnDescriptor made of passed args - def hcd(arg, htd) + # Return a new ColumnFamilyDescriptor made of passed args + def hcd(arg, tdb) # String arg, single parameter constructor - return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.is_a?(String) + + return org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.of(arg) if arg.is_a?(String) raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg.delete(NAME) - family = htd.getFamily(name.to_java_bytes) + cfd = tdb.build.getColumnFamily(name.to_java_bytes) + if cfd != nil + cfdb = org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.newBuilder(cfd) + end # create it if it's a new family - family ||= org.apache.hadoop.hbase.HColumnDescriptor.new(name.to_java_bytes) - - family.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE) - family.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE) - family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) - family.setCacheIndexesOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_INDEX_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_INDEX_ON_WRITE) - family.setCacheBloomsOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_BLOOMS_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_BLOOMS_ON_WRITE) - family.setEvictBlocksOnClose(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::EVICT_BLOCKS_ON_CLOSE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::EVICT_BLOCKS_ON_CLOSE) - family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) - family.setInMemoryCompaction( - org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)) + cfdb ||= org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.newBuilder(name.to_java_bytes) + + cfdb.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::BLOCKCACHE))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::BLOCKCACHE) + cfdb.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::REPLICATION_SCOPE) + cfdb.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::CACHE_DATA_ON_WRITE) + cfdb.setCacheIndexesOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::CACHE_INDEX_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::CACHE_INDEX_ON_WRITE) + cfdb.setCacheBloomsOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::CACHE_BLOOMS_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::CACHE_BLOOMS_ON_WRITE) + cfdb.setEvictBlocksOnClose(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::EVICT_BLOCKS_ON_CLOSE))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::EVICT_BLOCKS_ON_CLOSE) + cfdb.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IN_MEMORY) + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION) + cfdb.setInMemoryCompaction( + org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION)) ) end - family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) - family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) - family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) - family.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS) - family.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS) - family.setKeepDeletedCells(org.apache.hadoop.hbase.KeepDeletedCells.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS).to_s.upcase)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS) - family.setCompressTags(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESS_TAGS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESS_TAGS) - family.setPrefetchBlocksOnOpen(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::PREFETCH_BLOCKS_ON_OPEN))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::PREFETCH_BLOCKS_ON_OPEN) - family.setMobEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IS_MOB))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IS_MOB) - family.setMobThreshold(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MOB_THRESHOLD))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MOB_THRESHOLD) - family.setNewVersionBehavior(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::NEW_VERSION_BEHAVIOR))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::NEW_VERSION_BEHAVIOR) - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER) - bloomtype = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER).upcase.to_sym + cfdb.setTimeToLive(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::TTL)) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::TTL) + cfdb.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::DATA_BLOCK_ENCODING) + cfdb.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::BLOCKSIZE) + cfdb.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::VERSIONS) + cfdb.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::MIN_VERSIONS) + cfdb.setKeepDeletedCells(org.apache.hadoop.hbase.KeepDeletedCells.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::KEEP_DELETED_CELLS).to_s.upcase)) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::KEEP_DELETED_CELLS) + cfdb.setCompressTags(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::COMPRESS_TAGS))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::COMPRESS_TAGS) + cfdb.setPrefetchBlocksOnOpen(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::PREFETCH_BLOCKS_ON_OPEN))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::PREFETCH_BLOCKS_ON_OPEN) + cfdb.setMobEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IS_MOB))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IS_MOB) + cfdb.setMobThreshold(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::MOB_THRESHOLD))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::MOB_THRESHOLD) + cfdb.setNewVersionBehavior(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::NEW_VERSION_BEHAVIOR))) if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::NEW_VERSION_BEHAVIOR) + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::BLOOMFILTER) + bloomtype = arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::BLOOMFILTER).upcase.to_sym if org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype) - family.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype)) + cfdb.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype)) else raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(' ')) end end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION) - compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION).upcase.to_sym + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::COMPRESSION) + compression = arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::COMPRESSION).upcase.to_sym if org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression) - family.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) + cfdb.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) else raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(' ')) end end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION) - algorithm = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION).upcase - family.setEncryptionType(algorithm) - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY) + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::ENCRYPTION) + algorithm = arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::ENCRYPTION).upcase + cfdb.setEncryptionType(algorithm) + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY) key = org.apache.hadoop.hbase.io.crypto.Encryption.pbkdf128( - arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY) + arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY) ) - family.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key, + cfdb.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key, algorithm)) end end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT) - compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT).upcase.to_sym + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT) + compression = arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT).upcase.to_sym if org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression) - family.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) + cfdb.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression)) else raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(' ')) end end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY) - storage_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY).upcase - family.setStoragePolicy(storage_policy) + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::STORAGE_POLICY) + storage_policy = arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::STORAGE_POLICY).upcase + cfdb.setStoragePolicy(storage_policy) end - if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MOB_COMPACT_PARTITION_POLICY) - mob_partition_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MOB_COMPACT_PARTITION_POLICY).upcase.to_sym + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::MOB_COMPACT_PARTITION_POLICY) + mob_partition_policy = arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::MOB_COMPACT_PARTITION_POLICY).upcase.to_sym if org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.include?(mob_partition_policy) - family.setMobCompactPartitionPolicy(org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.valueOf(mob_partition_policy)) + cfdb.setMobCompactPartitionPolicy(org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.valueOf(mob_partition_policy)) else raise(ArgumentError, "MOB_COMPACT_PARTITION_POLICY #{mob_partition_policy} is not supported. Use one of " + org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.join(' ')) end end - set_user_metadata(family, arg.delete(METADATA)) if arg[METADATA] - set_descriptor_config(family, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] - if arg.include?(org.apache.hadoop.hbase - .HColumnDescriptor::DFS_REPLICATION) - family.setDFSReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase - .HColumnDescriptor::DFS_REPLICATION))) + set_user_metadata(cfdb, arg.delete(METADATA)) if arg[METADATA] + set_descriptor_config(cfdb, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + if arg.include?(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::DFS_REPLICATION) + cfdb.setDFSReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::DFS_REPLICATION))) end arg.each_key do |unknown_key| puts(format('Unknown argument ignored for column family %s: %s', name, unknown_key)) end - family + cfdb.build end #---------------------------------------------------------------------------------------------- @@ -1096,7 +1098,7 @@ module Hbase raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.is_a?(Hash) for k, v in config v = v.to_s unless v.nil? - descriptor.setConfiguration(k, v) + descriptor.setValue(k, v) end end @@ -1230,22 +1232,22 @@ module Hbase @admin.getLocks end - # Parse arguments and update HTableDescriptor accordingly - def update_htd_from_arg(htd, arg) - htd.setOwnerString(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::OWNER)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::OWNER) - htd.setMaxFileSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE) - htd.setReadOnly(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::READONLY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::READONLY) - htd.setCompactionEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED) - htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED) - htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MEMSTORE_FLUSHSIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MEMSTORE_FLUSHSIZE) - htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::DURABILITY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::DURABILITY) - htd.setPriority(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::PRIORITY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::PRIORITY) - htd.setFlushPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY) - htd.setRegionMemstoreReplication(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION) - htd.setRegionSplitPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY) - htd.setRegionReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION) - set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] - set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + # Parse arguments and update TableDescriptorBuilder accordingly + def update_htd_from_arg(tdb, arg) + tdb.setOwnerString(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::OWNER)) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::OWNER) + tdb.setMaxFileSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::MAX_FILESIZE))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::MAX_FILESIZE) + tdb.setReadOnly(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::READONLY))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::READONLY) + tdb.setCompactionEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::COMPACTION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::COMPACTION_ENABLED) + tdb.setNormalizationEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::NORMALIZATION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::NORMALIZATION_ENABLED) + tdb.setMemStoreFlushSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::MEMSTORE_FLUSHSIZE))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::MEMSTORE_FLUSHSIZE) + tdb.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::DURABILITY))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::DURABILITY) + tdb.setPriority(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::PRIORITY))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::PRIORITY) + tdb.setFlushPolicyClassName(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::FLUSH_POLICY)) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::FLUSH_POLICY) + tdb.setRegionMemStoreReplication(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::REGION_MEMSTORE_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::REGION_MEMSTORE_REPLICATION) + tdb.setRegionSplitPolicyClassName(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::SPLIT_POLICY)) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::SPLIT_POLICY) + tdb.setRegionReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.client.TableDescriptorBuilder::REGION_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.client.TableDescriptorBuilder::REGION_REPLICATION) + set_user_metadata(tdb, arg.delete(METADATA)) if arg[METADATA] + set_descriptor_config(tdb, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] end #---------------------------------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb index 28484cbc57..9a967d837d 100644 --- a/hbase-shell/src/main/ruby/hbase_constants.rb +++ b/hbase-shell/src/main/ruby/hbase_constants.rb @@ -39,7 +39,7 @@ module HBaseConstants NAME = org.apache.hadoop.hbase.HConstants::NAME VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY - IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION + IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION METADATA = org.apache.hadoop.hbase.HConstants::METADATA STOPROW = 'STOPROW'.freeze STARTROW = 'STARTROW'.freeze @@ -100,8 +100,8 @@ module HBaseConstants end end - promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants) - promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants) + promote_constants(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.constants) + promote_constants(org.apache.hadoop.hbase.client.TableDescriptorBuilder.constants) end # Include classes definition -- 2.14.1