Index: hbase-handler/src/test/results/hbase_bulk.m.out =================================================================== --- hbase-handler/src/test/results/hbase_bulk.m.out (revision 1065710) +++ hbase-handler/src/test/results/hbase_bulk.m.out (working copy) @@ -25,30 +25,28 @@ POSTHOOK: Output: default@hbsort PREHOOK: query: -- this is a dummy table used for controlling how the input file -- for TotalOrderPartitioner is created -create external table hbpartition(part_break string) -row format serde +create table hbpartition(part_break string) +row format serde 'org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe' -stored as -inputformat +stored as +inputformat 'org.apache.hadoop.mapred.TextInputFormat' -outputformat +outputformat 'org.apache.hadoop.hive.ql.io.HiveNullValueSequenceFileOutputFormat' -location '/tmp/hbpartitions' PREHOOK: type: CREATETABLE POSTHOOK: query: -- this is a dummy table used for controlling how the input file -- for TotalOrderPartitioner is created -create external table hbpartition(part_break string) -row format serde +create table hbpartition(part_break string) +row format serde 'org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe' -stored as -inputformat +stored as +inputformat 'org.apache.hadoop.mapred.TextInputFormat' -outputformat +outputformat 'org.apache.hadoop.hive.ql.io.HiveNullValueSequenceFileOutputFormat' -location '/tmp/hbpartitions' POSTHOOK: type: CREATETABLE POSTHOOK: Output: default@hbpartition -PREHOOK: query: -- this should produce one file in /tmp/hbpartitions, but we do not +PREHOOK: query: -- this should produce one file, but we do not -- know what it will be called, so we will copy it to a well known -- filename /tmp/hbpartition.lst insert overwrite table hbpartition @@ -58,7 +56,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@hbpartition -POSTHOOK: query: -- this should produce one file in /tmp/hbpartitions, but we do not +POSTHOOK: query: -- this should produce one file, but we do not -- know what it will be called, so we will copy it to a well known -- filename /tmp/hbpartition.lst insert overwrite table hbpartition @@ -69,7 +67,7 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@hbpartition POSTHOOK: Lineage: hbpartition.part_break SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] - 1 1 139 hdfs://localhost.localdomain:33778/tmp/hbpartitions + 1 1 139 hdfs://localhost.localdomain:37780/build/ql/test/data/warehouse/hbpartition PREHOOK: query: -- this should produce three files in /tmp/hbsort/cf -- include some trailing blanks and nulls to make sure we handle them correctly insert overwrite table hbsort @@ -98,7 +96,7 @@ POSTHOOK: Lineage: hbsort.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: hbsort.val EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: hbsort.val2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] - 1 3 23227 hdfs://localhost.localdomain:33778/tmp/hbsort/cf + 1 3 23380 hdfs://localhost.localdomain:37780/tmp/hbsort/cf PREHOOK: query: -- To get the files out to your local filesystem for loading into -- HBase, run mkdir -p /tmp/blah/cf, then uncomment and -- semicolon-terminate the line below before running this test: Index: hbase-handler/src/test/results/hbase_stats.q.out =================================================================== --- hbase-handler/src/test/results/hbase_stats.q.out (revision 1065710) +++ hbase-handler/src/test/results/hbase_stats.q.out (working copy) @@ -14,11 +14,11 @@ POSTHOOK: Lineage: stats_src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: stats_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: analyze table stats_src compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@stats_src PREHOOK: Output: default@stats_src POSTHOOK: query: analyze table stats_src compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@stats_src POSTHOOK: Output: default@stats_src POSTHOOK: Lineage: stats_src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -37,18 +37,14 @@ # Detailed Table Information Database: default Owner: null -CreateTime: Sun Oct 17 21:42:31 PDT 2010 +CreateTime: Tue Jan 25 14:48:20 PST 2011 LastAccessTime: UNKNOWN Protect Mode: None Retention: 0 -Location: pfile:/data/users/njain/hive_commit1/hive_commit1/build/hbase-handler/test/data/warehouse/stats_src +Location: pfile:/data/users/jsichi/open/hive-trunk/build/hbase-handler/test/data/warehouse/stats_src Table Type: MANAGED_TABLE Table Parameters: - numFiles 1 - numPartitions 0 - numRows 500 - totalSize 5812 - transient_lastDdlTime 1287376963 + transient_lastDdlTime 1295995714 # Storage Information SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -94,11 +90,11 @@ POSTHOOK: Lineage: stats_src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: stats_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: analyze table hbase_part partition(ds='2008-04-08', hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@hbase_part@ds=2008-04-08/hr=11 PREHOOK: Output: default@hbase_part POSTHOOK: query: analyze table hbase_part partition(ds='2008-04-08', hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_part@ds=2008-04-08/hr=11 POSTHOOK: Output: default@hbase_part POSTHOOK: Lineage: hbase_part PARTITION(ds=2010-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -108,11 +104,11 @@ POSTHOOK: Lineage: stats_src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: stats_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: analyze table hbase_part partition(ds='2008-04-08', hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@hbase_part@ds=2008-04-08/hr=12 PREHOOK: Output: default@hbase_part POSTHOOK: query: analyze table hbase_part partition(ds='2008-04-08', hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_part@ds=2008-04-08/hr=12 POSTHOOK: Output: default@hbase_part POSTHOOK: Lineage: hbase_part PARTITION(ds=2010-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -145,18 +141,14 @@ # Detailed Table Information Database: default Owner: null -CreateTime: Sun Oct 17 21:42:44 PDT 2010 +CreateTime: Tue Jan 25 14:49:11 PST 2011 LastAccessTime: UNKNOWN Protect Mode: None Retention: 0 -Location: pfile:/data/users/njain/hive_commit1/hive_commit1/build/hbase-handler/test/data/warehouse/hbase_part +Location: pfile:/data/users/jsichi/open/hive-trunk/build/hbase-handler/test/data/warehouse/hbase_part Table Type: MANAGED_TABLE Table Parameters: - numFiles 2 - numPartitions 2 - numRows 1000 - totalSize 11624 - transient_lastDdlTime 1287376983 + transient_lastDdlTime 1295995751 # Storage Information SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -193,15 +185,12 @@ Partition Value: [2010-04-08, 11] Database: default Table: hbase_part -CreateTime: Sun Oct 17 21:42:49 PDT 2010 +CreateTime: Tue Jan 25 14:49:26 PST 2011 LastAccessTime: UNKNOWN Protect Mode: None -Location: pfile:/data/users/njain/hive_commit1/hive_commit1/build/hbase-handler/test/data/warehouse/hbase_part/ds=2010-04-08/hr=11 +Location: pfile:/data/users/jsichi/open/hive-trunk/build/hbase-handler/test/data/warehouse/hbase_part/ds=2010-04-08/hr=11 Partition Parameters: - numFiles 1 - numRows 500 - totalSize 5812 - transient_lastDdlTime 1287376969 + transient_lastDdlTime 1295995766 # Storage Information SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -238,15 +227,12 @@ Partition Value: [2010-04-08, 12] Database: default Table: hbase_part -CreateTime: Sun Oct 17 21:42:54 PDT 2010 +CreateTime: Tue Jan 25 14:49:52 PST 2011 LastAccessTime: UNKNOWN Protect Mode: None -Location: pfile:/data/users/njain/hive_commit1/hive_commit1/build/hbase-handler/test/data/warehouse/hbase_part/ds=2010-04-08/hr=12 +Location: pfile:/data/users/jsichi/open/hive-trunk/build/hbase-handler/test/data/warehouse/hbase_part/ds=2010-04-08/hr=12 Partition Parameters: - numFiles 1 - numRows 500 - totalSize 5812 - transient_lastDdlTime 1287376974 + transient_lastDdlTime 1295995792 # Storage Information SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Index: hbase-handler/src/test/org/apache/hadoop/hive/hbase/HBaseTestSetup.java =================================================================== --- hbase-handler/src/test/org/apache/hadoop/hive/hbase/HBaseTestSetup.java (revision 1065710) +++ hbase-handler/src/test/org/apache/hadoop/hive/hbase/HBaseTestSetup.java (working copy) @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.MiniZooKeeperCluster; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hive.conf.HiveConf; Index: hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseSerDe.java =================================================================== --- hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseSerDe.java (revision 1065710) +++ hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseSerDe.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.hbase; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Properties; @@ -84,6 +85,7 @@ kvs.add(new KeyValue(rowKey, cfc, qualDouble, Bytes.toBytes("5.3"))); kvs.add(new KeyValue(rowKey, cfa, qualString, Bytes.toBytes("Hadoop, HBase, and Hive"))); kvs.add(new KeyValue(rowKey, cfb, qualBool, Bytes.toBytes("true"))); + Collections.sort(kvs, KeyValue.COMPARATOR); Result r = new Result(kvs); Index: hbase-handler/src/test/queries/hbase_bulk.m =================================================================== --- hbase-handler/src/test/queries/hbase_bulk.m (revision 1065710) +++ hbase-handler/src/test/queries/hbase_bulk.m (working copy) @@ -11,17 +11,16 @@ -- this is a dummy table used for controlling how the input file -- for TotalOrderPartitioner is created -create external table hbpartition(part_break string) -row format serde +create table hbpartition(part_break string) +row format serde 'org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe' -stored as -inputformat +stored as +inputformat 'org.apache.hadoop.mapred.TextInputFormat' -outputformat -'org.apache.hadoop.hive.ql.io.HiveNullValueSequenceFileOutputFormat' -location '/tmp/hbpartitions'; +outputformat +'org.apache.hadoop.hive.ql.io.HiveNullValueSequenceFileOutputFormat'; --- this should produce one file in /tmp/hbpartitions, but we do not +-- this should produce one file, but we do not -- know what it will be called, so we will copy it to a well known -- filename /tmp/hbpartition.lst insert overwrite table hbpartition @@ -29,8 +28,8 @@ from src where value='val_100' or value='val_200'; -dfs -count /tmp/hbpartitions; -dfs -cp /tmp/hbpartitions/* /tmp/hbpartition.lst; +dfs -count /build/ql/test/data/warehouse/hbpartition; +dfs -cp /build/ql/test/data/warehouse/hbpartition/* /tmp/hbpartition.lst; set mapred.reduce.tasks=3; set hive.mapred.partitioner=org.apache.hadoop.mapred.lib.TotalOrderPartitioner; Index: hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java =================================================================== --- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java (revision 1065710) +++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java (working copy) @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.util.Bytes; @@ -72,6 +73,8 @@ return admin; } catch (MasterNotRunningException mnre) { throw new MetaException(StringUtils.stringifyException(mnre)); + } catch (ZooKeeperConnectionException zkce) { + throw new MetaException(StringUtils.stringifyException(zkce)); } } @@ -295,7 +298,7 @@ // this better later when we support more interesting predicates. return null; } - + DecomposedPredicate decomposedPredicate = new DecomposedPredicate(); decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions( searchConditions); Index: build.xml =================================================================== --- build.xml (revision 1065710) +++ build.xml (working copy) @@ -53,16 +53,6 @@ - - - - - - - - - - @@ -121,7 +111,7 @@ - + @@ -132,7 +122,7 @@ - + @@ -143,7 +133,7 @@ - + @@ -658,5 +648,5 @@ - + Index: lib/hbase-0.20.3-test.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: lib/hbase-0.20.3.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: lib/zookeeper-3.2.2.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: ivy/ivysettings.xml =================================================================== --- ivy/ivysettings.xml (revision 1065710) +++ ivy/ivysettings.xml (working copy) @@ -55,17 +55,17 @@ - + - + - + @@ -74,9 +74,9 @@ - + - + Index: ivy/libraries.properties =================================================================== --- ivy/libraries.properties (revision 1065710) +++ ivy/libraries.properties (working copy) @@ -35,8 +35,8 @@ commons-logging.version=1.0.4 commons-logging-api.version=1.0.4 commons-pool.version=1.5.4 -hbase.version=0.20.3 -hbase-test.version=0.20.3 +hbase.version=0.89.0-SNAPSHOT +hbase-test.version=0.89.0-SNAPSHOT jdo-api.version=2.3-ec jdom.version=1.1 jline.version=0.9.94 @@ -47,4 +47,5 @@ slf4j-log4j12.version=1.6.1 thrift.version=0.5.0 thrift-fb303.version=0.5.0 -zookeeper.version=3.2.2 +zookeeper.version=3.3.1 +guava.version=r06 Index: build-common.xml =================================================================== --- build-common.xml (revision 1065710) +++ build-common.xml (working copy) @@ -105,7 +105,7 @@ - + @@ -272,7 +272,7 @@ - + + @@ -6,5 +6,17 @@ + + + + + + + + Index: ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (revision 1065710) +++ ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (working copy) @@ -47,7 +47,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.MiniZooKeeperCluster; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hive.cli.CliDriver; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; @@ -338,7 +338,7 @@ } } db.setCurrentDatabase(DEFAULT_DATABASE_NAME); - + List roleNames = db.getAllRoleNames(); for (String roleName : roleNames) { db.dropRole(roleName); @@ -395,9 +395,9 @@ } public void createSources() throws Exception { - + startSessionState(); - + // Create a bunch of tables with columns key and value LinkedList cols = new LinkedList(); cols.add("key"); @@ -499,7 +499,7 @@ testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); // conf.logVars(System.out); // System.out.flush(); - + SessionState.start(conf); db = Hive.get(conf); fs = FileSystem.get(conf); @@ -580,7 +580,7 @@ private CliSessionState startSessionState() throws FileNotFoundException, UnsupportedEncodingException { - + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, "org.apache.hadoop.hive.ql.security.DummyAuthenticator"); Index: ql/build.xml =================================================================== --- ql/build.xml (revision 1065710) +++ ql/build.xml (working copy) @@ -148,7 +148,7 @@ - +