Index: hbase-server/src/main/ruby/hbase/table.rb =================================================================== --- hbase-server/src/main/ruby/hbase/table.rb (revision 1382616) +++ hbase-server/src/main/ruby/hbase/table.rb (working copy) @@ -113,6 +113,7 @@ @table = org.apache.hadoop.hbase.client.HTable.new(configuration, table_name) @name = table_name @shell = shell + @converters = Hash.new() end # Note the below methods are prefixed with '_' to hide them from the average user, as @@ -187,7 +188,8 @@ def _get_internal(row, *args) get = org.apache.hadoop.hbase.client.Get.new(row.to_s.to_java_bytes) maxlength = -1 - + @converters.clear() + # Normalize args args = args.first if args.first.kind_of?(Hash) if args.kind_of?(String) || args.kind_of?(Array) @@ -299,6 +301,7 @@ limit = args.delete("LIMIT") || -1 maxlength = args.delete("MAXLENGTH") || -1 + @converters.clear() if args.any? filter = args["FILTER"] @@ -450,6 +453,7 @@ # Returns family and (when has it) qualifier for a column name def parse_column_name(column) split = org.apache.hadoop.hbase.KeyValue.parseColumn(column.to_java_bytes) + set_converter(split) if split.length > 1 return split[0], (split.length > 1) ? split[1] : nil end @@ -474,9 +478,42 @@ if kv.isDelete val = "timestamp=#{kv.getTimestamp}, type=#{org.apache.hadoop.hbase.KeyValue::Type::codeToType(kv.getType)}" else - val = "timestamp=#{kv.getTimestamp}, value=#{org.apache.hadoop.hbase.util.Bytes::toStringBinary(kv.getValue)}" + val = "timestamp=#{kv.getTimestamp}, value=#{convert(column, kv)}" end (maxlength != -1) ? val[0, maxlength] : val end + + def convert(column, kv) + #use org.apache.hadoop.hbase.util.Bytes as the default class + klazz_name = 'org.apache.hadoop.hbase.util.Bytes' + #use org.apache.hadoop.hbase.util.Bytes::toStringBinary as the default convertor + converter = 'toStringBinary' + if @converters.has_key?(column) + # lookup the CONVERTER for certain column - "cf:qualifier" + matches = /c\((.+)\)\.(.+)/.match(@converters[column]) + if matches.nil? + # cannot match the pattern of 'c(className).functionname' + # use the default klazz_name + converter = @converters[column] + else + klazz_name = matches[1] + converter = matches[2] + end + end + method = eval(klazz_name).method(converter) + return method.call(kv.getValue) # apply the converter + end + + # if the column spec contains CONVERTER information, to get rid of :CONVERTER info from column pair. + # 1. return back normal column pair as usual, i.e., "cf:qualifier[:CONVERTER]" to "cf" and "qualifier" only + # 2. register the CONVERTER information based on column spec - "cf:qualifier" + def set_converter(column) + family = String.from_java_bytes(column[0]) + parts = org.apache.hadoop.hbase.KeyValue.parseColumn(column[1]) + if parts.length > 1 + @converters["#{family}:#{String.from_java_bytes(parts[0])}"] = String.from_java_bytes(parts[1]) + column[1] = parts[0] + end + end end end Index: hbase-server/src/main/ruby/shell/commands/get.rb =================================================================== --- hbase-server/src/main/ruby/shell/commands/get.rb (revision 1382616) +++ hbase-server/src/main/ruby/shell/commands/get.rb (working copy) @@ -36,6 +36,19 @@ hbase> get 't1', 'r1', 'c1', 'c2' hbase> get 't1', 'r1', ['c1', 'c2'] +Besides the default 'toStringBinary' format, get also supports customized output format for each column. +User can define a CONVERTER, which follows the general column spec of "cf:qualifier", like "cf:qualifier[:CONVERTER]". +The CONVERTER can be formatted as + 1. either "converterFun" alone (e.g, toInt, toString) + 2. or a complete pattern of 'c(MyConverterClass).converterFun'. +While using the 1st. CONVERTER definition, it chooses "org.apache.hadoop.hbase.util.Bytes" as the default class. +Example: + hbase> get 't1', 'r1' {COLUMN => ['cf:qualifier:toInt', 'cf:qualifier:c(org.apache.hadoop.hbase.util.Bytes).toInt'] } + +ATTENTION!The above feature cannot be applied on the entire column family, +since it is too confusing to distinguish "cf:qualifier" and "cf:CONVERTER" + + The same commands also can be run on a reference to a table (obtained via get_table or create_table). Suppose you had a reference t to table 't1', the corresponding commands would be: Index: hbase-server/src/main/ruby/shell/commands/scan.rb =================================================================== --- hbase-server/src/main/ruby/shell/commands/scan.rb (revision 1382616) +++ hbase-server/src/main/ruby/shell/commands/scan.rb (working copy) @@ -58,6 +58,19 @@ hbase> scan 't1', {RAW => true, VERSIONS => 10} +Besides the default 'toStringBinary' format, scan also supports customized output format for each column. +User can define a CONVERTER, which follows the general column spec of "cf:qualifier", like "cf:qualifier[:CONVERTER]". +The CONVERTER can be formatted as + 1. either "converterFun" alone (e.g, toInt, toString) + 2. or a complete pattern of 'c(MyConverterClass).converterFun'. +While using the 1st. CONVERTER definition, it chooses "org.apache.hadoop.hbase.util.Bytes" as the default class. +Example: + hbase> scan 't1', {COLUMNS => ['cf:qualifier:toInt', 'cf:qualifier:c(org.apache.hadoop.hbase.util.Bytes).toInt'] } + +ATTENTION!The above feature cannot be applied on the entire column family, +since it is too confusing to distinguish "cf:qualifier" and "cf:CONVERTER" + + Scan can also be used directly from a table, by first getting a reference to a table, like such: hbase> t = get_table 't' @@ -65,6 +78,7 @@ Note in the above situation, you can still provide all the filtering, columns, options, etc as described above. + EOF end Index: hbase-server/src/test/ruby/hbase/table_test.rb =================================================================== --- hbase-server/src/test/ruby/hbase/table_test.rb (revision 1382616) +++ hbase-server/src/test/ruby/hbase/table_test.rb (working copy) @@ -311,6 +311,22 @@ @test_table._get_internal('1') { |col, val| res[col] = val } assert_equal(res.keys.sort, [ 'x:a', 'x:b' ]) end + + define_test "get should support COLUMNS with value CONVERTER information" do + @test_table.put(1, "x:c", [1024].pack('N')) + @test_table.put(1, "x:d", [98].pack('N')) + begin + res = @test_table._get_internal('1', ['x:c:toInt'], ['x:d:c(org.apache.hadoop.hbase.util.Bytes).toInt']) + assert_not_nil(res) + assert_kind_of(Hash, res) + assert_not_nil(/value=1024/.match(res['x:c'])) + assert_not_nil(/value=98/.match(res['x:d'])) + ensure + # clean up newly added columns for this test only. + @test_table.delete(1, "x:c") + @test_table.delete(1, "x:d") + end + end #------------------------------------------------------------------------------- @@ -417,5 +433,22 @@ res = @test_table._scan_internal { |row, cells| rows[row] = cells } assert_equal(rows.keys.size, res) end + + define_test "scan should support COLUMNS with value CONVERTER information" do + @test_table.put(1, "x:c", [1024].pack('N')) + @test_table.put(1, "x:d", [98].pack('N')) + begin + res = @test_table._scan_internal COLUMNS => ['x:c:toInt', 'x:d:c(org.apache.hadoop.hbase.util.Bytes).toInt'] + assert_not_nil(res) + assert_kind_of(Hash, res) + assert_not_nil(/value=1024/.match(res['1']['x:c'])) + assert_not_nil(/value=98/.match(res['1']['x:d'])) + ensure + # clean up newly added columns for this test only. + @test_table.delete(1, "x:c") + @test_table.delete(1, "x:d") + end +end + end end