diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java index 8eb007f..26dfcf0 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java @@ -105,8 +105,9 @@ public void setupConnection() throws IOException { // Turn off caching, as we want to test actual interaction with HBase conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); - HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); - hbase.setConnection(hconn); + HBaseReadWrite.setTestConnection(hconn); + /*HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); + hbase.setConnection(hconn);*/ store = new HBaseStore(); store.setConf(conf); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java new file mode 100644 index 0000000..0b1537a --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java @@ -0,0 +1,147 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Integration tests with HBase Mini-cluster for HBaseStore + */ +public class TestHBaseMetastoreSql { + + private static final Log LOG = LogFactory.getLog(TestHBaseStoreIntegration.class.getName()); + + private static HBaseTestingUtility utility; + private static HTableInterface tblTable; + private static HTableInterface sdTable; + private static HTableInterface partTable; + private static HTableInterface dbTable; + private static HTableInterface roleTable; + private static HTableInterface globalPrivsTable; + private static HTableInterface principalRoleMapTable; + private static Map emptyParameters = new HashMap(); + + @Rule public ExpectedException thrown = ExpectedException.none(); + @Mock private HBaseConnection hconn; + private HBaseStore store; + private HiveConf conf; + private Driver driver; + + @BeforeClass + public static void startMiniCluster() throws Exception { + utility = new HBaseTestingUtility(); + utility.startMiniCluster(); + byte[][] families = new byte[][] {HBaseReadWrite.CATALOG_CF, HBaseReadWrite.STATS_CF}; + tblTable = utility.createTable(HBaseReadWrite.TABLE_TABLE.getBytes(HBaseUtils.ENCODING), + families); + sdTable = utility.createTable(HBaseReadWrite.SD_TABLE.getBytes(HBaseUtils.ENCODING), + HBaseReadWrite.CATALOG_CF); + partTable = utility.createTable(HBaseReadWrite.PART_TABLE.getBytes(HBaseUtils.ENCODING), + families); + dbTable = utility.createTable(HBaseReadWrite.DB_TABLE.getBytes(HBaseUtils.ENCODING), + HBaseReadWrite.CATALOG_CF); + roleTable = utility.createTable(HBaseReadWrite.ROLE_TABLE.getBytes(HBaseUtils.ENCODING), + HBaseReadWrite.CATALOG_CF); + globalPrivsTable = + utility.createTable(HBaseReadWrite.GLOBAL_PRIVS_TABLE.getBytes(HBaseUtils.ENCODING), + HBaseReadWrite.CATALOG_CF); + principalRoleMapTable = + utility.createTable(HBaseReadWrite.USER_TO_ROLE_TABLE.getBytes(HBaseUtils.ENCODING), + HBaseReadWrite.CATALOG_CF); + } + + @AfterClass + public static void shutdownMiniCluster() throws Exception { + utility.shutdownMiniCluster(); + } + + @Before + public void setupConnection() throws IOException { + MockitoAnnotations.initMocks(this); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.SD_TABLE)).thenReturn(sdTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.TABLE_TABLE)).thenReturn(tblTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.PART_TABLE)).thenReturn(partTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.DB_TABLE)).thenReturn(dbTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.ROLE_TABLE)).thenReturn(roleTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.GLOBAL_PRIVS_TABLE)).thenReturn(globalPrivsTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.USER_TO_ROLE_TABLE)).thenReturn(principalRoleMapTable); + conf = new HiveConf(); + conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); + conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, + "org.apache.hadoop.hive.metastore.hbase.HBaseStore"); + conf.setBoolVar(HiveConf.ConfVars.METASTORE_FASTPATH, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + HBaseReadWrite.setTestConnection(hconn); + + SessionState.start(new CliSessionState(conf)); + driver = new Driver(conf); + } + + @Test + public void insertIntoTable() throws Exception { + driver.run("create table iit (c int)"); + CommandProcessorResponse rsp = driver.run("insert into table iit values (3)"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void insertIntoPartitionTable() throws Exception { + driver.run("create table iipt (c int) partitioned by (ds string)"); + CommandProcessorResponse rsp = + driver.run("insert into table iipt partition(ds) values (1, 'today'), (2, 'yesterday')," + + "(3, 'tomorrow')"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java index 24e8c65..b76fa78 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java @@ -138,8 +138,8 @@ public void setupConnection() throws IOException { // Turn off caching, as we want to test actual interaction with HBase conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); - HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); - hbase.setConnection(hconn); + HBaseReadWrite.setTestConnection(hconn); + // HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); store = new HBaseStore(); store.setConf(conf); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java index 57c62ca..53e0c1a 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java @@ -113,8 +113,8 @@ public void setupConnection() throws IOException { // Turn off caching, as we want to test actual interaction with HBase conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); - HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); - hbase.setConnection(hconn); + HBaseReadWrite.setTestConnection(hconn); + // HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); store = new HBaseStore(); store.setConf(conf); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 348cc30..5d3e9c2 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1879,7 +1879,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab firePreEvent(new PreAddPartitionEvent(tbl, part, this)); - part.setSd(tbl.getSd()); + part.setSd(tbl.getSd().deepCopy()); partLocation = new Path(tbl.getSd().getLocation(), Warehouse .makePartName(tbl.getPartitionKeys(), part_vals)); part.getSd().setLocation(partLocation.toString()); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java index 1f336db..e80f876 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java @@ -85,6 +85,7 @@ private final static int TABLES_TO_CACHE = 10; @VisibleForTesting final static String TEST_CONN = "test_connection"; + private static HBaseConnection testConn; private final static String[] tableNames = { DB_TABLE, GLOBAL_PRIVS_TABLE, PART_TABLE, USER_TO_ROLE_TABLE, ROLE_TABLE, SD_TABLE, TABLE_TABLE }; @@ -153,7 +154,11 @@ private HBaseReadWrite(Configuration configuration) { try { String connClass = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS); - if (!TEST_CONN.equals(connClass)) { + if (TEST_CONN.equals(connClass)) { + conn = testConn; + LOG.debug("Using test connection."); + } else { + LOG.debug("Instantiating connection class " + connClass); Class c = Class.forName(connClass); Object o = c.newInstance(); if (HBaseConnection.class.isAssignableFrom(o.getClass())) { @@ -1629,8 +1634,8 @@ int countStorageDescriptor() throws IOException { * @param connection Mock connection objecct */ @VisibleForTesting - void setConnection(HBaseConnection connection) { - conn = connection; + static void setTestConnection(HBaseConnection connection) { + testConn = connection; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 7752cac..deebb9b 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index 3c7e35e..d30e2a0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -190,11 +190,13 @@ static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType typ List results = new ArrayList(); for (HbaseMetastoreProto.PrivilegeGrantInfo proto : privileges) { PrivilegeGrantInfo pgi = new PrivilegeGrantInfo(); - pgi.setPrivilege(proto.getPrivilege()); + if (proto.hasPrivilege()) pgi.setPrivilege(proto.getPrivilege()); pgi.setCreateTime((int)proto.getCreateTime()); - pgi.setGrantor(proto.getGrantor()); - pgi.setGrantorType(convertPrincipalTypes(proto.getGrantorType())); - pgi.setGrantOption(proto.getGrantOption()); + if (proto.hasGrantor()) pgi.setGrantor(proto.getGrantor()); + if (proto.hasGrantorType()) { + pgi.setGrantorType(convertPrincipalTypes(proto.getGrantorType())); + } + if (proto.hasGrantOption()) pgi.setGrantOption(proto.getGrantOption()); results.add(pgi); } return results; @@ -272,7 +274,7 @@ static Role deserializeRole(String roleName, byte[] value) HbaseMetastoreProto.Role protoRole = HbaseMetastoreProto.Role.parseFrom(value); role.setCreateTime((int)protoRole.getCreateTime()); - role.setOwnerName(protoRole.getOwnerName()); + if (protoRole.hasOwnerName()) role.setOwnerName(protoRole.getOwnerName()); return role; } @@ -344,12 +346,14 @@ static Database deserializeDatabase(String dbName, byte[] value) db.setName(dbName); HbaseMetastoreProto.Database protoDb = HbaseMetastoreProto.Database.parseFrom(value); db.setName(dbName); - db.setDescription(protoDb.getDescription()); - db.setLocationUri(protoDb.getUri()); - db.setParameters(buildParameters(protoDb.getParameters())); - db.setPrivileges(buildPrincipalPrivilegeSet(protoDb.getPrivileges())); - db.setOwnerName(protoDb.getOwnerName()); - db.setOwnerType(convertPrincipalTypes(protoDb.getOwnerType())); + if (protoDb.hasDescription()) db.setDescription(protoDb.getDescription()); + if (protoDb.hasUri()) db.setLocationUri(protoDb.getUri()); + if (protoDb.hasParameters()) db.setParameters(buildParameters(protoDb.getParameters())); + if (protoDb.hasPrivileges()) { + db.setPrivileges(buildPrincipalPrivilegeSet(protoDb.getPrivileges())); + } + if (protoDb.hasOwnerName()) db.setOwnerName(protoDb.getOwnerName()); + if (protoDb.hasOwnerType()) db.setOwnerType(convertPrincipalTypes(protoDb.getOwnerType())); return db; } @@ -372,7 +376,8 @@ static Database deserializeDatabase(byte[] key, byte[] value) convertFieldSchemaListFromProto(List protoList) { List schemas = new ArrayList(protoList.size()); for (HbaseMetastoreProto.FieldSchema proto : protoList) { - schemas.add(new FieldSchema(proto.getName(), proto.getType(), proto.getComment())); + schemas.add(new FieldSchema(proto.getName(), proto.getType(), + proto.hasComment() ? proto.getComment() : null)); } return schemas; } @@ -557,35 +562,42 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) HbaseMetastoreProto.StorageDescriptor.parseFrom(serialized); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(convertFieldSchemaListFromProto(proto.getColsList())); - sd.setInputFormat(proto.getInputFormat()); - sd.setOutputFormat(proto.getOutputFormat()); + if (proto.hasInputFormat()) sd.setInputFormat(proto.getInputFormat()); + if (proto.hasOutputFormat()) sd.setOutputFormat(proto.getOutputFormat()); sd.setCompressed(proto.getIsCompressed()); sd.setNumBuckets(proto.getNumBuckets()); - SerDeInfo serde = new SerDeInfo(); - serde.setName(proto.getSerdeInfo().getName()); - serde.setSerializationLib(proto.getSerdeInfo().getSerializationLib()); - serde.setParameters(buildParameters(proto.getSerdeInfo().getParameters())); - sd.setSerdeInfo(serde); + if (proto.hasSerdeInfo()) { + SerDeInfo serde = new SerDeInfo(); + serde.setName(proto.getSerdeInfo().getName()); + serde.setSerializationLib(proto.getSerdeInfo().getSerializationLib()); + serde.setParameters(buildParameters(proto.getSerdeInfo().getParameters())); + sd.setSerdeInfo(serde); + } sd.setBucketCols(new ArrayList(proto.getBucketColsList())); List sortCols = new ArrayList(); for (HbaseMetastoreProto.StorageDescriptor.Order protoOrder : proto.getSortColsList()) { sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder())); } sd.setSortCols(sortCols); - SkewedInfo skewed = new SkewedInfo(); - skewed.setSkewedColNames(new ArrayList(proto.getSkewedInfo().getSkewedColNamesList())); - for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList innerList : - proto.getSkewedInfo().getSkewedColValuesList()) { - skewed.addToSkewedColValues(new ArrayList(innerList.getSkewedColValueList())); - } - Map, String> colMaps = new HashMap, String>(); - for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap map : - proto.getSkewedInfo().getSkewedColValueLocationMapsList()) { - colMaps.put(new ArrayList(map.getKeyList()), map.getValue()); - } - skewed.setSkewedColValueLocationMaps(colMaps); - sd.setSkewedInfo(skewed); - sd.setStoredAsSubDirectories(proto.getStoredAsSubDirectories()); + if (proto.hasSkewedInfo()) { + SkewedInfo skewed = new SkewedInfo(); + skewed + .setSkewedColNames(new ArrayList(proto.getSkewedInfo().getSkewedColNamesList())); + for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList innerList : + proto.getSkewedInfo().getSkewedColValuesList()) { + skewed.addToSkewedColValues(new ArrayList(innerList.getSkewedColValueList())); + } + Map, String> colMaps = new HashMap, String>(); + for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap map : + proto.getSkewedInfo().getSkewedColValueLocationMapsList()) { + colMaps.put(new ArrayList(map.getKeyList()), map.getValue()); + } + skewed.setSkewedColValueLocationMaps(colMaps); + sd.setSkewedInfo(skewed); + } + if (proto.hasStoredAsSubDirectories()) { + sd.setStoredAsSubDirectories(proto.getStoredAsSubDirectories()); + } return sd; } @@ -628,12 +640,14 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) } static void assembleStorageDescriptor(StorageDescriptor sd, StorageDescriptorParts parts) { - sd.setLocation(parts.location); - sd.setParameters(parts.parameters); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setLocation(parts.location); + ssd.setParameters(parts.parameters); + ssd.setShared(sd); if (parts.containingPartition != null) { - parts.containingPartition.setSd(sd); + parts.containingPartition.setSd(ssd); } else if (parts.containingTable != null) { - parts.containingTable.setSd(sd); + parts.containingTable.setSd(ssd); } else { throw new RuntimeException("Need either a partition or a table"); } @@ -674,10 +688,10 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa part.setValues(partVals); part.setCreateTime((int)proto.getCreateTime()); part.setLastAccessTime((int)proto.getLastAccessTime()); - sdParts.location = proto.getLocation(); - sdParts.parameters = buildParameters(proto.getSdParameters()); + if (proto.hasLocation()) sdParts.location = proto.getLocation(); + if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); sdParts.sdHash = proto.getSdHash().toByteArray(); - part.setParameters(buildParameters(proto.getParameters())); + if (proto.hasParameters()) part.setParameters(buildParameters(proto.getParameters())); return sdParts; } @@ -761,16 +775,18 @@ static StorageDescriptorParts deserializeTable(String dbName, String tableName, table.setCreateTime((int)proto.getCreateTime()); table.setLastAccessTime((int)proto.getLastAccessTime()); table.setRetention((int)proto.getRetention()); - sdParts.location = proto.getLocation(); - sdParts.parameters = buildParameters(proto.getSdParameters()); + if (proto.hasLocation()) sdParts.location = proto.getLocation(); + if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); sdParts.sdHash = proto.getSdHash().toByteArray(); table.setPartitionKeys(convertFieldSchemaListFromProto(proto.getPartitionKeysList())); table.setParameters(buildParameters(proto.getParameters())); - table.setViewOriginalText(proto.getViewOriginalText()); - table.setViewExpandedText(proto.getViewExpandedText()); + if (proto.hasViewOriginalText()) table.setViewOriginalText(proto.getViewOriginalText()); + if (proto.hasViewExpandedText()) table.setViewExpandedText(proto.getViewExpandedText()); table.setTableType(proto.getTableType()); - table.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); - table.setTemporary(proto.getIsTemporary()); + if (proto.hasPrivileges()) { + table.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); + } + if (proto.hasIsTemporary()) table.setTemporary(proto.getIsTemporary()); return sdParts; } @@ -880,15 +896,23 @@ static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics stats, colData.setBooleanStats(boolData); } else if (proto.hasLongStats()) { LongColumnStatsData longData = new LongColumnStatsData(); - longData.setLowValue(proto.getLongStats().getLowValue()); - longData.setHighValue(proto.getLongStats().getHighValue()); + if (proto.getLongStats().hasLowValue()) { + longData.setLowValue(proto.getLongStats().getLowValue()); + } + if (proto.getLongStats().hasHighValue()) { + longData.setHighValue(proto.getLongStats().getHighValue()); + } longData.setNumNulls(proto.getNumNulls()); longData.setNumDVs(proto.getNumDistinctValues()); colData.setLongStats(longData); } else if (proto.hasDoubleStats()) { DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); - doubleData.setLowValue(proto.getDoubleStats().getLowValue()); - doubleData.setHighValue(proto.getDoubleStats().getHighValue()); + if (proto.getDoubleStats().hasLowValue()) { + doubleData.setLowValue(proto.getDoubleStats().getLowValue()); + } + if (proto.getDoubleStats().hasHighValue()) { + doubleData.setHighValue(proto.getDoubleStats().getHighValue()); + } doubleData.setNumNulls(proto.getNumNulls()); doubleData.setNumDVs(proto.getNumDistinctValues()); colData.setDoubleStats(doubleData); @@ -907,14 +931,18 @@ static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics stats, colData.setBinaryStats(binaryData); } else if (proto.hasDecimalStats()) { DecimalColumnStatsData decimalData = new DecimalColumnStatsData(); - Decimal hiVal = new Decimal(); - hiVal.setUnscaled(proto.getDecimalStats().getHighValue().getUnscaled().toByteArray()); - hiVal.setScale((short) proto.getDecimalStats().getHighValue().getScale()); - decimalData.setHighValue(hiVal); - Decimal loVal = new Decimal(); - loVal.setUnscaled(proto.getDecimalStats().getLowValue().getUnscaled().toByteArray()); - loVal.setScale((short) proto.getDecimalStats().getLowValue().getScale()); - decimalData.setLowValue(loVal); + if (proto.getDecimalStats().hasHighValue()) { + Decimal hiVal = new Decimal(); + hiVal.setUnscaled(proto.getDecimalStats().getHighValue().getUnscaled().toByteArray()); + hiVal.setScale((short) proto.getDecimalStats().getHighValue().getScale()); + decimalData.setHighValue(hiVal); + } + if (proto.getDecimalStats().hasLowValue()) { + Decimal loVal = new Decimal(); + loVal.setUnscaled(proto.getDecimalStats().getLowValue().getUnscaled().toByteArray()); + loVal.setScale((short) proto.getDecimalStats().getLowValue().getScale()); + decimalData.setLowValue(loVal); + } decimalData.setNumNulls(proto.getNumNulls()); decimalData.setNumDVs(proto.getNumDistinctValues()); colData.setDecimalStats(decimalData); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java index 12fea80..f5e9168 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedStorageDescriptor.java @@ -26,7 +26,8 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -38,7 +39,11 @@ */ public class SharedStorageDescriptor extends StorageDescriptor { static final private Log LOG = LogFactory.getLog(SharedStorageDescriptor.class.getName()); - StorageDescriptor shared; + private StorageDescriptor shared; + private boolean copied = false; + private CopyOnWriteColList colList = null; + private CopyOnWriteOrderList orderList = null; + private CopyOnWriteBucketList bucketList = null; SharedStorageDescriptor() { } @@ -49,13 +54,20 @@ public SharedStorageDescriptor(SharedStorageDescriptor that) { this.shared = that.shared; } - void readShared(byte[] hash) throws IOException { - shared = HBaseReadWrite.getInstance().getStorageDescriptor(hash); + @Override + public StorageDescriptor deepCopy() { + return new SharedStorageDescriptor(this); + } + + @Override + public boolean isSetCols() { + return shared.isSetCols(); } @Override public List getCols() { - return shared.getCols(); + return copied ? shared.getCols() : ( + shared.getCols() == null ? null : copyCols(shared.getCols())); } @Override @@ -69,33 +81,143 @@ public int getColsSize() { } @Override + public void setCols(List cols) { + copyOnWrite(); + shared.setCols(cols); + } + + @Override + public void addToCols(FieldSchema fs) { + copyOnWrite(); + shared.addToCols(fs); + } + + @Override + public void unsetCols() { + copyOnWrite(); + shared.unsetCols(); + } + + @Override + public boolean isSetInputFormat() { + return shared.isSetInputFormat(); + } + + @Override public String getInputFormat() { return shared.getInputFormat(); } @Override + public void setInputFormat(String inputFormat) { + copyOnWrite(); + shared.setInputFormat(inputFormat); + } + + @Override + public void unsetInputFormat() { + copyOnWrite(); + shared.unsetInputFormat(); + } + + @Override + public boolean isSetOutputFormat() { + return shared.isSetOutputFormat(); + } + + @Override public String getOutputFormat() { return shared.getOutputFormat(); } @Override + public void setOutputFormat(String outputFormat) { + copyOnWrite(); + shared.setOutputFormat(outputFormat); + } + + @Override + public void unsetOutputFormat() { + copyOnWrite(); + shared.unsetOutputFormat(); + } + + @Override + public boolean isSetCompressed() { + return shared.isSetCompressed(); + } + + @Override public boolean isCompressed() { return shared.isCompressed(); } @Override + public void setCompressed(boolean isCompressed) { + copyOnWrite(); + shared.setCompressed(isCompressed); + } + + @Override + public void unsetCompressed() { + copyOnWrite(); + shared.unsetCompressed(); + } + + @Override + public boolean isSetNumBuckets() { + return shared.isSetNumBuckets(); + } + + @Override public int getNumBuckets() { return shared.getNumBuckets(); } @Override + public void setNumBuckets(int numBuckets) { + copyOnWrite(); + shared.setNumBuckets(numBuckets); + } + + @Override + public void unsetNumBuckets() { + copyOnWrite(); + shared.unsetNumBuckets(); + } + + @Override + public boolean isSetSerdeInfo() { + return shared.isSetSerdeInfo(); + } + + @Override public SerDeInfo getSerdeInfo() { - return shared.getSerdeInfo(); + return copied ? shared.getSerdeInfo() : ( + shared.getSerdeInfo() == null ? null : new SerDeInfoWrapper(shared.getSerdeInfo())); + } + + @Override + public void setSerdeInfo(SerDeInfo serdeInfo) { + copyOnWrite(); + shared.setSerdeInfo(serdeInfo); + } + + @Override + public void unsetSerdeInfo() { + copyOnWrite(); + shared.unsetSerdeInfo(); + } + + @Override + public boolean isSetBucketCols() { + return shared.isSetBucketCols(); } @Override public List getBucketCols() { - return shared.getBucketCols(); + return copied ? shared.getBucketCols() : ( + shared.getBucketCols() == null ? null : copyBucketCols(shared.getBucketCols())); } @Override @@ -109,8 +231,32 @@ public int getBucketColsSize() { } @Override + public void setBucketCols(List bucketCols) { + copyOnWrite(); + shared.setBucketCols(bucketCols); + } + + @Override + public void addToBucketCols(String bucketCol) { + copyOnWrite(); + shared.addToBucketCols(bucketCol); + } + + @Override + public void unsetBucketCols() { + copyOnWrite(); + shared.unsetBucketCols(); + } + + @Override + public boolean isSetSortCols() { + return shared.isSetSortCols(); + } + + @Override public List getSortCols() { - return shared.getSortCols(); + return copied ? shared.getSortCols() : ( + shared.getSortCols() == null ? null : copySort(shared.getSortCols())); } @Override @@ -124,12 +270,508 @@ public int getSortColsSize() { } @Override + public void setSortCols(List sortCols) { + copyOnWrite(); + shared.setSortCols(sortCols); + } + + @Override + public void addToSortCols(Order sortCol) { + copyOnWrite(); + shared.addToSortCols(sortCol); + } + + @Override + public void unsetSortCols() { + copyOnWrite(); + shared.unsetSortCols(); + } + + @Override + public boolean isSetSkewedInfo() { + return shared.isSetSkewedInfo(); + } + + @Override public SkewedInfo getSkewedInfo() { - return shared.getSkewedInfo(); + return copied ? shared.getSkewedInfo() : ( + shared.getSkewedInfo() == null ? null : new SkewWrapper(shared.getSkewedInfo())); + } + + @Override + public void setSkewedInfo(SkewedInfo skewedInfo) { + copyOnWrite(); + shared.setSkewedInfo(skewedInfo); + } + + @Override + public void unsetSkewedInfo() { + copyOnWrite(); + shared.unsetSkewedInfo(); + } + + @Override + public boolean isSetStoredAsSubDirectories() { + return shared.isSetStoredAsSubDirectories(); } @Override public boolean isStoredAsSubDirectories() { return shared.isStoredAsSubDirectories(); } + + @Override + public void setStoredAsSubDirectories(boolean sasd) { + copyOnWrite(); + shared.setStoredAsSubDirectories(sasd); + } + + @Override + public void unsetStoredAsSubDirectories() { + copyOnWrite(); + shared.unsetStoredAsSubDirectories(); + } + + void setShared(StorageDescriptor sd) { + shared = sd; + } + + StorageDescriptor getShared() { + return shared; + } + + private void copyOnWrite() { + if (!copied) { + shared = new StorageDescriptor(shared); + copied = true; + } + } + + private class SerDeInfoWrapper extends SerDeInfo { + + SerDeInfoWrapper(SerDeInfo serde) { + super(serde); + } + + @Override + public void setName(String name) { + copyOnWrite(); + shared.getSerdeInfo().setName(name); + } + + @Override + public void unsetName() { + copyOnWrite(); + shared.getSerdeInfo().unsetName(); + } + + @Override + public void setSerializationLib(String lib) { + copyOnWrite(); + shared.getSerdeInfo().setSerializationLib(lib); + } + + @Override + public void unsetSerializationLib() { + copyOnWrite(); + shared.getSerdeInfo().unsetSerializationLib(); + } + + @Override + public void setParameters(Map parameters) { + copyOnWrite(); + shared.getSerdeInfo().setParameters(parameters); + } + + @Override + public void unsetParameters() { + copyOnWrite(); + shared.getSerdeInfo().unsetParameters(); + } + + @Override + public void putToParameters(String key, String value) { + copyOnWrite(); + shared.getSerdeInfo().putToParameters(key, value); + } + } + + private class SkewWrapper extends SkewedInfo { + SkewWrapper(SkewedInfo skew) { + super(skew); + } + + @Override + public void setSkewedColNames(List skewedColNames) { + copyOnWrite(); + shared.getSkewedInfo().setSkewedColNames(skewedColNames); + } + + @Override + public void unsetSkewedColNames() { + copyOnWrite(); + shared.getSkewedInfo().unsetSkewedColNames(); + } + + @Override + public void addToSkewedColNames(String skewCol) { + copyOnWrite(); + shared.getSkewedInfo().addToSkewedColNames(skewCol); + } + + @Override + public void setSkewedColValues(List> skewedColValues) { + copyOnWrite(); + shared.getSkewedInfo().setSkewedColValues(skewedColValues); + } + + @Override + public void unsetSkewedColValues() { + copyOnWrite(); + shared.getSkewedInfo().unsetSkewedColValues(); + } + + @Override + public void addToSkewedColValues(List skewedColValue) { + copyOnWrite(); + shared.getSkewedInfo().addToSkewedColValues(skewedColValue); + } + + @Override + public void setSkewedColValueLocationMaps(Map, String> maps) { + copyOnWrite(); + shared.getSkewedInfo().setSkewedColValueLocationMaps(maps); + } + + @Override + public void unsetSkewedColValueLocationMaps() { + copyOnWrite(); + shared.getSkewedInfo().unsetSkewedColValueLocationMaps(); + } + + @Override + public void putToSkewedColValueLocationMaps(List key, String value) { + copyOnWrite(); + shared.getSkewedInfo().putToSkewedColValueLocationMaps(key, value); + } + } + + private CopyOnWriteOrderList copySort(List sort) { + if (orderList == null) { + orderList = new CopyOnWriteOrderList(sort.size()); + for (int i = 0; i < sort.size(); i++) { + orderList.secretAdd(new OrderWrapper(i, sort.get(i))); + } + } + return orderList; + } + + private class CopyOnWriteOrderList extends ArrayList { + + CopyOnWriteOrderList(int size) { + super(size); + } + + private void secretAdd(OrderWrapper order) { + super.add(order); + } + + @Override + public boolean add(Order t) { + copyOnWrite(); + return shared.getSortCols().add(t); + } + + @Override + public boolean remove(Object o) { + copyOnWrite(); + return shared.getSortCols().remove(o); + } + + @Override + public boolean addAll(Collection c) { + copyOnWrite(); + return shared.getSortCols().addAll(c); + } + + @Override + public boolean addAll(int index, Collection c) { + copyOnWrite(); + return shared.getSortCols().addAll(c); + } + + @Override + public boolean removeAll(Collection c) { + copyOnWrite(); + return shared.getSortCols().removeAll(c); + } + + @Override + public boolean retainAll(Collection c) { + copyOnWrite(); + return shared.getSortCols().retainAll(c); + } + + @Override + public void clear() { + copyOnWrite(); + shared.getSortCols().clear(); + } + + @Override + public Order set(int index, Order element) { + copyOnWrite(); + return shared.getSortCols().set(index, element); + } + + @Override + public void add(int index, Order element) { + copyOnWrite(); + shared.getSortCols().add(index, element); + } + + @Override + public Order remove(int index) { + copyOnWrite(); + return shared.getSortCols().remove(index); + } + } + + private class OrderWrapper extends Order { + final private int pos; + + OrderWrapper(int pos, Order order) { + super(order); + this.pos = pos; + } + + @Override + public void setCol(String col) { + copyOnWrite(); + shared.getSortCols().get(pos).setCol(col); + } + + @Override + public void unsetCol() { + copyOnWrite(); + shared.getSortCols().get(pos).unsetCol(); + } + + @Override + public void setOrder(int order) { + copyOnWrite(); + shared.getSortCols().get(pos).setOrder(order); + } + + @Override + public void unsetOrder() { + copyOnWrite(); + shared.getSortCols().get(pos).unsetOrder(); + } + } + + private CopyOnWriteColList copyCols(List cols) { + if (colList == null) { + colList = new CopyOnWriteColList(cols.size()); + for (int i = 0; i < cols.size(); i++) { + colList.secretAdd(new FieldSchemaWrapper(i, cols.get(i))); + } + } + return colList; + } + + private class CopyOnWriteColList extends ArrayList { + + CopyOnWriteColList(int size) { + super(size); + } + + private void secretAdd(FieldSchemaWrapper col) { + super.add(col); + } + + @Override + public boolean add(FieldSchema t) { + copyOnWrite(); + return shared.getCols().add(t); + } + + @Override + public boolean remove(Object o) { + copyOnWrite(); + return shared.getCols().remove(o); + } + + @Override + public boolean addAll(Collection c) { + copyOnWrite(); + return shared.getCols().addAll(c); + } + + @Override + public boolean addAll(int index, Collection c) { + copyOnWrite(); + return shared.getCols().addAll(c); + } + + @Override + public boolean removeAll(Collection c) { + copyOnWrite(); + return shared.getCols().removeAll(c); + } + + @Override + public boolean retainAll(Collection c) { + copyOnWrite(); + return shared.getCols().retainAll(c); + } + + @Override + public void clear() { + copyOnWrite(); + shared.getCols().clear(); + } + + @Override + public FieldSchema set(int index, FieldSchema element) { + copyOnWrite(); + return shared.getCols().set(index, element); + } + + @Override + public void add(int index, FieldSchema element) { + copyOnWrite(); + shared.getCols().add(index, element); + } + + @Override + public FieldSchema remove(int index) { + copyOnWrite(); + return shared.getCols().remove(index); + } + } + + private class FieldSchemaWrapper extends FieldSchema { + final private int pos; + + FieldSchemaWrapper(int pos, FieldSchema col) { + super(col); + this.pos = pos; + } + + @Override + public void setName(String name) { + copyOnWrite(); + shared.getCols().get(pos).setName(name); + } + + @Override + public void unsetName() { + copyOnWrite(); + shared.getCols().get(pos).unsetName(); + } + + @Override + public void setType(String type) { + copyOnWrite(); + shared.getCols().get(pos).setType(type); + } + + @Override + public void unsetType() { + copyOnWrite(); + shared.getCols().get(pos).unsetType(); + } + + @Override + public void setComment(String comment) { + copyOnWrite(); + shared.getCols().get(pos).setComment(comment); + } + + @Override + public void unsetComment() { + copyOnWrite(); + shared.getCols().get(pos).unsetComment(); + } + } + + private CopyOnWriteBucketList copyBucketCols(List cols) { + if (bucketList == null) { + bucketList = new CopyOnWriteBucketList(cols); + } + return bucketList; + } + + private class CopyOnWriteBucketList extends ArrayList { + + CopyOnWriteBucketList(Collection c) { + super(c); + } + + private void secretAdd(String col) { + super.add(col); + } + + @Override + public boolean add(String t) { + copyOnWrite(); + return shared.getBucketCols().add(t); + } + + @Override + public boolean remove(Object o) { + copyOnWrite(); + return shared.getBucketCols().remove(o); + } + + @Override + public boolean addAll(Collection c) { + copyOnWrite(); + return shared.getBucketCols().addAll(c); + } + + @Override + public boolean addAll(int index, Collection c) { + copyOnWrite(); + return shared.getBucketCols().addAll(c); + } + + @Override + public boolean removeAll(Collection c) { + copyOnWrite(); + return shared.getBucketCols().removeAll(c); + } + + @Override + public boolean retainAll(Collection c) { + copyOnWrite(); + return shared.getBucketCols().retainAll(c); + } + + @Override + public void clear() { + copyOnWrite(); + shared.getBucketCols().clear(); + } + + @Override + public String set(int index, String element) { + copyOnWrite(); + return shared.getBucketCols().set(index, element); + } + + @Override + public void add(int index, String element) { + copyOnWrite(); + shared.getBucketCols().add(index, element); + } + + @Override + public String remove(int index) { + copyOnWrite(); + return shared.getBucketCols().remove(index); + } + } + } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java index bc99633..c5c5b83 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java @@ -146,8 +146,8 @@ public Boolean answer(InvocationOnMock invocation) throws Throwable { HBaseConnection hconn = Mockito.mock(HBaseConnection.class); Mockito.when(hconn.getHBaseTable(Mockito.anyString())).thenReturn(htable); HiveConf.setVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); + HBaseReadWrite.setTestConnection(hconn); HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); - hbase.setConnection(hconn); HBaseStore store = new HBaseStore(); store.setConf(conf); return store; diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java new file mode 100644 index 0000000..c9d1eef --- /dev/null +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.junit.Assert; +import org.junit.Test; + +import java.util.List; + + +/** + * + */ +public class TestSharedStorageDescriptor { + private static final Log LOG = LogFactory.getLog(TestHBaseStore.class.getName()); + + + @Test + public void location() { + StorageDescriptor sd = new StorageDescriptor(); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setLocation("here"); + ssd.setShared(sd); + ssd.setLocation("there"); + Assert.assertTrue(sd == ssd.getShared()); + } + + @Test + public void changeOnInputFormat() { + StorageDescriptor sd = new StorageDescriptor(); + sd.setInputFormat("input"); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + Assert.assertEquals("input", ssd.getInputFormat()); + ssd.setInputFormat("different"); + Assert.assertFalse(sd == ssd.getShared()); + Assert.assertEquals("input", sd.getInputFormat()); + Assert.assertEquals("different", ssd.getInputFormat()); + Assert.assertEquals("input", sd.getInputFormat()); + } + + @Test + public void changeOnSerde() { + StorageDescriptor sd = new StorageDescriptor(); + SerDeInfo serde = new SerDeInfo(); + serde.setName("serde"); + sd.setSerdeInfo(serde); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + Assert.assertEquals("serde", ssd.getSerdeInfo().getName()); + ssd.getSerdeInfo().setName("different"); + Assert.assertFalse(sd == ssd.getShared()); + Assert.assertEquals("serde", serde.getName()); + Assert.assertEquals("different", ssd.getSerdeInfo().getName()); + Assert.assertEquals("serde", sd.getSerdeInfo().getName()); + } + + @Test + public void multipleChangesDontCauseMultipleCopies() { + StorageDescriptor sd = new StorageDescriptor(); + sd.setInputFormat("input"); + sd.setOutputFormat("output"); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + Assert.assertEquals("input", ssd.getInputFormat()); + ssd.setInputFormat("different"); + Assert.assertFalse(sd == ssd.getShared()); + Assert.assertEquals("input", sd.getInputFormat()); + Assert.assertEquals("different", ssd.getInputFormat()); + StorageDescriptor keep = ssd.getShared(); + ssd.setOutputFormat("different_output"); + Assert.assertEquals("different", ssd.getInputFormat()); + Assert.assertEquals("different_output", ssd.getOutputFormat()); + Assert.assertEquals("output", sd.getOutputFormat()); + Assert.assertTrue(keep == ssd.getShared()); + } + + @Test + public void changeOrder() { + StorageDescriptor sd = new StorageDescriptor(); + sd.addToSortCols(new Order("fred", 1)); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + Assert.assertEquals(1, ssd.getSortCols().get(0).getOrder()); + ssd.getSortCols().get(0).setOrder(2); + Assert.assertFalse(sd == ssd.getShared()); + Assert.assertEquals(2, ssd.getSortCols().get(0).getOrder()); + Assert.assertEquals(1, sd.getSortCols().get(0).getOrder()); + } + + @Test + public void changeOrderList() { + StorageDescriptor sd = new StorageDescriptor(); + sd.addToSortCols(new Order("fred", 1)); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + Assert.assertEquals(1, ssd.getSortCols().get(0).getOrder()); + List list = ssd.getSortCols(); + list.add(new Order("bob", 2)); + Assert.assertFalse(sd == ssd.getShared()); + Assert.assertEquals(2, ssd.getSortColsSize()); + Assert.assertEquals(1, sd.getSortColsSize()); + } + +} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java index bc24daa..fb6e573 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java @@ -101,8 +101,8 @@ public Result answer(InvocationOnMock invocation) throws Throwable { HiveConf conf = new HiveConf(); conf.setIntVar(HiveConf.ConfVars.METASTORE_HBASE_CACHE_SIZE, 30); conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); + HBaseReadWrite.setTestConnection(hconn); hrw = HBaseReadWrite.getInstance(conf); - hrw.setConnection(hconn); StatsCache.getInstance(conf).clear(); puts[0] = puts[1] = null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 8331a49..601cf0c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -39,17 +39,12 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.hbase.SharedStorageDescriptor; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.OutputFormat; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.transport.TMemoryBuffer; /** * A Hive Table Partition: is a fundamental storage unit within a Table. @@ -96,7 +91,7 @@ public Partition(Table tbl) throws HiveException { org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition(); if (!tbl.isView()) { - tPart.setSd(tbl.getTTable().getSd()); // TODO: get a copy + tPart.setSd(tbl.getTTable().getSd().deepCopy()); } initialize(tbl, tPart); } @@ -141,37 +136,13 @@ public Partition(Table tbl, Map partSpec, Path location) throws tpart.setValues(pvals); if (!tbl.isView()) { - tpart.setSd(cloneSd(tbl)); + tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; } /** - * We already have methods that clone stuff using XML or Kryo. - * And now for something completely different - let's clone SD using Thrift! - * Refactored into a method. - */ - public static StorageDescriptor cloneSd(Table tbl) throws HiveException { - if (tbl.getSd() instanceof SharedStorageDescriptor) { - return new SharedStorageDescriptor((SharedStorageDescriptor)tbl.getSd()); - } - // What is the point of this? Why not just use the copy constructor in StorageDescriptor? - StorageDescriptor sd = new StorageDescriptor(); - try { - // replace with THRIFT-138 - TMemoryBuffer buffer = new TMemoryBuffer(1024); - TBinaryProtocol prot = new TBinaryProtocol(buffer); - tbl.getTTable().getSd().write(prot); - sd.read(prot); - } catch (TException e) { - LOG.error("Could not create a copy of StorageDescription"); - throw new HiveException("Could not create a copy of StorageDescription",e); - } - return sd; - } - - /** * Initializes this object with the given variables * * @param table diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java index fc9d0bd..5f70010 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java @@ -415,7 +415,7 @@ public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. - wrapperApiPart.setSd(t.getSd()); + wrapperApiPart.setSd(t.getSd().deepCopy()); } initialize(new TableWrapper(t),wrapperApiPart); } diff --git ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java index c0fd4b3..5a271e5 100644 --- ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java +++ ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java @@ -257,7 +257,7 @@ private void addPartition(HiveMetaStoreClient client, Table table, part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap()); - part.setSd(table.getSd()); + part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location);